From a2d61ac842808af64a697e5f395337f53fc6dac9 Mon Sep 17 00:00:00 2001 From: Tomek Urbaszek Date: Mon, 12 Oct 2020 14:10:56 +0200 Subject: [PATCH 01/48] Refactor setup.py (#48) This PR introduces some structure to setup.py by encapsulating some parts of the logic into functions and creating the main function. --- CONTRIBUTING.md | 2 +- docs/source/setup.rst | 13 +- setup/setup.py | 412 ++++++++++++++++++++++-------------------- 3 files changed, 222 insertions(+), 205 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 86ec40be..a28a44fb 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -15,5 +15,5 @@ We also have: ## Development installation -This project requires Python in higher version than 3.3. +This project requires Python in higher version than 3.4. More information will come soon! diff --git a/docs/source/setup.rst b/docs/source/setup.rst index ad821704..21a25a66 100644 --- a/docs/source/setup.rst +++ b/docs/source/setup.rst @@ -108,11 +108,7 @@ following components installed and set up: existing databases, but not for new setups). Does not have to be on the same machine, but it may help speed up processing. - A web server of your choice (Apache HTTP Server, NGINX, lighttp etc) -- Python 3.4 or newer with the following libraries installed: -- - elasticsearch -- - certifi -- - pyyaml -- - bcrypt +- Python 3.4 or newer with installed libraries from `setup/requirements.txt` - Gunicorn for Python 3.x (often called gunicorn3) or mod_wsgi ########################################### @@ -125,10 +121,9 @@ Assuming you wish to install kibble in /var/www/kibble, you would set it up by issuing the following: - ``git clone https://github.com/apache/kibble.git /var/www/kibble`` -- ``cd /var/www/kibble/setup`` -- ``pip3 install -r requirements.txt`` -- ``python3 setup.py`` -- Enter the configuration parameters the setup process asks for +- ``cd /var/www/kibble`` +- ``pip install -r setup/requirements.txt`` +- ``python setup/setup.py`` This will set up the database, the configuration file, and create your initial administrator account for the UI. You can later on do additional diff --git a/setup/setup.py b/setup/setup.py index f06bfd2f..4a40fae1 100644 --- a/setup/setup.py +++ b/setup/setup.py @@ -14,143 +14,84 @@ # See the License for the specific language governing permissions and # limitations under the License. -KIBBLE_VERSION = '0.1.0' # ABI/API compat demarcation. -KIBBLE_DB_VERSION = 2 # Second database revision - import sys - -if sys.version_info <= (3, 3): - print("This script requires Python 3.4 or higher") - sys.exit(-1) - import os -import getpass -import subprocess import argparse -import shutil +import logging +from getpass import getpass + import yaml import bcrypt import json +from elasticsearch import Elasticsearch -mappings = json.load(open("mappings.json")) -myyaml = yaml.load(open("kibble.yaml.sample")) +KIBBLE_VERSION = '0.1.0' # ABI/API compat demarcation. +KIBBLE_DB_VERSION = 2 # Second database revision -dopip = False -try: - from elasticsearch import Elasticsearch - from elasticsearch import VERSION as ES_VERSION - ES_MAJOR = ES_VERSION[0] -except: - dopip = True - -if dopip and (getpass.getuser() != "root"): - print("It looks like you need to install some python modules first") - print("Either run this as root to do so, or run: ") - print("pip3 install elasticsearch certifi bcrypt") +if sys.version_info <= (3, 3): + print("This script requires Python 3.4 or higher") sys.exit(-1) -elif dopip: - print("Before we get started, we need to install some modules") - print("Hang on!") - try: - subprocess.check_call(('pip3','install','elasticsearch', 'certifi', 'bcrypt')) - from elasticsearch import Elasticsearch - except: - print("Oh dear, looks like this failed :(") - print("Please install elasticsearch and certifi before you try again:") - print("pip install elasticsearch certifi") - sys.exit(-1) - # Arguments for non-interactive setups like docker -arg_parser = argparse.ArgumentParser() -arg_parser.add_argument("-e", "--hostname", help="Pre-defined hostname for ElasticSearch (docker setups)") -arg_parser.add_argument("-p", "--port", help="Pre-defined port for ES (docker setups)") -arg_parser.add_argument("-d", "--dbname", help="Pre-defined Database prefix (docker setups)") -arg_parser.add_argument("-s", "--shards", help="Predefined number of ES shards (docker setups)") -arg_parser.add_argument("-r", "--replicas", help="Predefined number of replicas for ES (docker setups)") -arg_parser.add_argument("-m", "--mailhost", help="Pre-defined mail server host (docker setups)") -arg_parser.add_argument("-a", "--autoadmin", action='store_true', help="Generate generic admin account (docker setups)") -arg_parser.add_argument("-k", "--skiponexist", action='store_true', help="Skip DB creation if DBs exist (docker setups)") -args = arg_parser.parse_args() - -print("Welcome to the Apache Kibble setup script!") -print("Let's start by determining some settings...") -print("") - - -hostname = args.hostname or "" -port = int(args.port) if args.port else 0 -dbname = args.dbname or "" -mlserver = args.mailhost or "" -mldom = "" -wc = "" -genname = "" -wce = False -shards = int(args.shards) if args.shards else 0 -replicas = int(args.replicas) if args.replicas else -1 - -while hostname == "": - hostname = input("What is the hostname of the ElasticSearch server? [localhost]: ") - if hostname == "": - print("Using default; localhost") - hostname = "localhost" -while port < 1: - try: - port = input("What port is ElasticSearch listening on? [9200]: ") - if port == "": - print("Using default; 9200") - port = 9200 - port = int(port) - except ValueError: - pass - -while dbname == "": - dbname = input("What would you like to call the DB index [kibble]: ") - if dbname == "": - print("Using default; kibble") - dbname = "kibble" - -while mlserver == "": - mlserver = input("What is the hostname of the outgoing mailserver? [localhost:25]: ") - if mlserver == "": - print("Using default; localhost:25") - mlserver = "localhost:25" - -while shards < 1: - try: - shards = input("How many shards for the ElasticSearch index? [5]:") - if shards == "": - print("Using default; 5") - shards = 5 - shards = int(shards) - except ValueError: - pass - -while replicas < 0: - try: - replicas = input("How many replicas for each shard? [1]: ") - if replicas == "": - print("Using default; 1") - replicas = 1 - replicas = int(replicas) - except ValueError: - pass - -adminName = "" -adminPass = "" -if args.autoadmin: - adminName = "admin@kibble" - adminPass = "kibbleAdmin" -while adminName == "": - adminName = input("Enter an email address for the adminstrator account: ") -while adminPass == "": - adminPass = input("Enter a password for the adminstrator account: ") - -print("Okay, I got all I need, setting up Kibble...") +def get_parser(): + arg_parser = argparse.ArgumentParser() + arg_parser.add_argument( + "-e", "--hostname", + help="Pre-defined hostname for ElasticSearch (docker setups). Default: localhost", + default="localhost" + ) + arg_parser.add_argument( + "-p", "--port", + help="Pre-defined port for ES (docker setups). Default: 9200", default=9200 + ) + arg_parser.add_argument( + "-d", "--dbname", help="Pre-defined Database prefix (docker setups). Default: kibble", default="kibble" + ) + arg_parser.add_argument( + "-s", "--shards", help="Predefined number of ES shards (docker setups), Default: 5", default=5 + ) + arg_parser.add_argument( + "-r", "--replicas", help="Predefined number of replicas for ES (docker setups). Default: 1", default=1 + ) + arg_parser.add_argument( + "-m", "--mailhost", + help="Pre-defined mail server host (docker setups). Default: localhost:25", + default="localhost:25" + ) + arg_parser.add_argument( + "-a", "--autoadmin", + action='store_true', + help="Generate generic admin account (docker setups). Default: False", + default=False + ) + arg_parser.add_argument( + "-k", "--skiponexist", + action='store_true', + help="Skip DB creation if DBs exist (docker setups). Defaul: True", default=True + ) + return arg_parser + + +def create_es_index( + hostname: str, + port: int, + dbname: str, + shards: int, + replicas: int, + admin_name: str, + admin_pass: str, + skiponexist: bool, +): + """Creates Elasticsearch index used by Kibble""" + + # elasticsearch logs lots of warnings on retries/connection failure + logging.getLogger("elasticsearch").setLevel(logging.ERROR) + + mappings_json = os.path.join(os.path.dirname(os.path.realpath(__file__)), "mappings.json") + with open(mappings_json, "r") as f: + mappings = json.load(f) -def createIndex(): - global mappings es = Elasticsearch([ { 'host': hostname, @@ -160,19 +101,27 @@ def createIndex(): }], max_retries=5, retry_on_timeout=True - ) + ) + + es_version =es.info()['version']['number'] + es6 = int(es_version.split('.')[0]) >= 6 + es7 = int(es_version.split('.')[0]) >= 7 - es6 = True if int(es.info()['version']['number'].split('.')[0]) >= 6 else False - es7 = True if int(es.info()['version']['number'].split('.')[0]) >= 7 else False if not es6: - print("New Kibble installations require ElasticSearch 6.x or newer! You appear to be running %s!" % es.info()['version']['number']) + print( + f"New Kibble installations require ElasticSearch 6.x or newer! " + f"You appear to be running {es_version}!" + ) sys.exit(-1) + # If ES >= 7, _doc is invalid and mapping should be rooted if es7: mappings['mappings'] = mappings['mappings']['_doc'] + # Check if index already exists - if es.indices.exists(dbname+"_api"): - if args.skiponexist: # Skip this is DB exists and -k added + if es.indices.exists(dbname + "_api"): + # Skip this is DB exists and -k added + if skiponexist: print("DB prefix exists, but --skiponexist used, skipping this step.") return print("Error: ElasticSearch DB prefix '%s' already exists!" % dbname) @@ -225,29 +174,29 @@ def createIndex(): ] for t in types: - iname = "%s_%s" % (dbname, t) - print("Creating index " + iname) - + iname = f"{dbname}_{t}" + print(f"Creating index {iname}") + settings = { - "number_of_shards" : shards, - "number_of_replicas" : replicas + "number_of_shards": shards, + "number_of_replicas": replicas } - - - res = es.indices.create(index = iname, body = { - "mappings" : mappings['mappings'], - "settings": settings - } - ) - - print("Indices created! %s " % res) - + es.indices.create( + index=iname, + body={ + "mappings": mappings['mappings'], + "settings": settings + } + ) + print(f"Indices created!") + print() + salt = bcrypt.gensalt() - pwd = bcrypt.hashpw(adminPass.encode('utf-8'), salt).decode('ascii') + pwd = bcrypt.hashpw(admin_pass.encode('utf-8'), salt).decode('ascii') print("Creating administrator account") doc = { - 'email': adminName, # Username (email) - 'password': pwd, # Hashed password + 'email': admin_name, # Username (email) + 'password': pwd, # Hashed password 'displayName': "Administrator", # Display Name 'organisations': [], # Orgs user belongs to (default is none) 'ownerships': [], # Orgs user owns (default is none) @@ -259,60 +208,133 @@ def createIndex(): 'apiversion': KIBBLE_VERSION, # Log current API version 'dbversion': KIBBLE_DB_VERSION # Log the database revision we accept (might change!) } - es.index(index=dbname+'_useraccount', doc_type = '_doc', id = adminName, body = doc) - es.index(index=dbname+'_api', doc_type = '_doc', id = 'current', body = dbdoc) + es.index(index=dbname+'_useraccount', doc_type='_doc', id=admin_name, body=doc) + es.index(index=dbname+'_api', doc_type='_doc', id='current', body=dbdoc) print("Account created!") -try: - import logging - # elasticsearch logs lots of warnings on retries/connection failure - logging.getLogger("elasticsearch").setLevel(logging.ERROR) - createIndex() - - -except Exception as e: - print("Index creation failed: %s" % e) - sys.exit(1) -kibble_yaml = '../api/yaml/kibble.yaml' +def get_kibble_yaml() -> str: + """Resolve path to kibble config yaml""" + kibble_yaml = os.path.join( + os.path.dirname(os.path.realpath(__file__)), + os.pardir, + "api", + "yaml", + "kibble.yaml" + ) + if os.path.exists(kibble_yaml): + print(f"{kibble_yaml} already exists! Writing to {kibble_yaml}.tmp instead") + kibble_yaml = kibble_yaml + ".tmp" + return kibble_yaml -if os.path.exists(kibble_yaml): - print("%s already exists! Writing to %s.tmp instead" % (kibble_yaml, kibble_yaml)) - kibble_yaml = kibble_yaml + ".tmp" - -print("Writing Kibble config (%s)" % kibble_yaml) +def save_config( + mlserver: str, + hostname: str, + port: int, + dbname: str, +): + """Save kibble config to yaml file""" + if ":" in mlserver: + try: + mailhost, mailport = mlserver.split(":") + except ValueError: + raise ValueError("mailhost argument must be in form of `host:port` or `host`") + else: + mailhost = mlserver + mailport = 25 -m = mlserver.split(':') -if len(m) == 1: - m.append(25) - -myconfig = { - 'api': { - 'version': KIBBLE_VERSION, - 'database': KIBBLE_DB_VERSION - }, - 'elasticsearch': { - 'host': hostname, - 'port': port, - 'ssl': False, - 'dbname': dbname - }, - 'mail': { - 'mailhost': m[0], - 'mailport': m[1], - 'sender': 'Kibble ' - }, - 'accounts': { - 'allowSignup': True, - 'verify': True + config = { + 'api': { + 'version': KIBBLE_VERSION, + 'database': KIBBLE_DB_VERSION + }, + 'elasticsearch': { + 'host': hostname, + 'port': port, + 'ssl': False, + 'dbname': dbname + }, + 'mail': { + 'mailhost': mailhost, + 'mailport': int(mailport), + 'sender': 'Kibble ' + }, + 'accounts': { + 'allowSignup': True, + 'verify': True + } } -} -with open(kibble_yaml, "w") as f: - f.write(yaml.dump(myconfig, default_flow_style = False)) - f.close() + kibble_yaml = get_kibble_yaml() + print(f"Writing Kibble config to {kibble_yaml}") + with open(kibble_yaml, "w") as f: + f.write(yaml.dump(config, default_flow_style = False)) + f.close() + + +def get_user_input(msg: str, secure: bool = False): + value = None + while not value: + value = getpass(msg) if secure else input(msg) + return value + + +def print_configuration(args): + print("Configuring Apache Kibble elasticsearch instance with the following arguments:") + print(f"- hostname: {args.hostname}") + print(f"- port: {int(args.port)}") + print(f"- dbname: {args.dbname}") + print(f"- shards: {int(args.shards)}") + print(f"- replicas: {int(args.replicas)}") + print() + + +def main(): + """ + The main Kibble setup logic. Using users input we create: + - Elasticsearch indexes used by Apache Kibble app + - Configuration yaml file + """ + parser = get_parser() + args = parser.parse_args() + + print("Welcome to the Apache Kibble setup script!") + print_configuration(args) + + admin_name = "admin@kibble" + admin_pass = "kibbleAdmin" + if not args.autoadmin: + admin_name = get_user_input("Enter an email address for the administrator account:") + admin_pass = get_user_input("Enter a password for the administrator account:", secure=True) + + # Create Elasticsearch index + try: + create_es_index( + hostname=args.hostname, + port=int(args.port), + dbname=args.dbname, + shards=int(args.shards), + replicas=int(args.replicas), + admin_name=admin_name, + admin_pass=admin_pass, + skiponexist=args.skiponexist, + ) + except Exception as e: + print("Index creation failed: %s" % e) + sys.exit(1) + print() + + # Create Kibble configuration file + save_config( + mlserver=args.mailhost, + hostname=args.hostname, + port=int(args.port), + dbname=args.dbname, + ) + print() + print("All done, Kibble should...work now :)") - -print("All done, Kibble should...work now :)") +if __name__ == '__main__': + main() From b371d80a39d90fa56eeda9edbc46c7cf58268716 Mon Sep 17 00:00:00 2001 From: Tomek Urbaszek Date: Fri, 16 Oct 2020 11:18:07 +0200 Subject: [PATCH 02/48] Introduce github actions for CI (#47) * Introduce github actions for CI * fixup! Introduce github actions for CI --- .github/workflows/ci.yaml | 30 ++++++++++++++++++++++++++++++ .pre-commit-config.yaml | 2 ++ CONTRIBUTING.md | 28 +++++++++++++++++++++++++++- 3 files changed, 59 insertions(+), 1 deletion(-) create mode 100644 .github/workflows/ci.yaml diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml new file mode 100644 index 00000000..f7f27c0f --- /dev/null +++ b/.github/workflows/ci.yaml @@ -0,0 +1,30 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +--- +name: CI +on: + - push + - pull_request + +jobs: + statics: + name: Static Checks + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: actions/setup-python@v1 + - uses: pre-commit/action@v1.0.1 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 07fae567..3dd6d750 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -28,4 +28,6 @@ repos: hooks: - id: check-yaml - id: end-of-file-fixer + exclude: ^ui/vendors/.*$ - id: trailing-whitespace + exclude: ^ui/vendors/.*$ diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index a28a44fb..2c3351a7 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,6 +1,6 @@ # Contributing to Kibble # -## Community +## Community The main development and design discussion happens on our mailing lists. We have a list specifically for development, and one for future user questions and feedback. @@ -17,3 +17,29 @@ We also have: This project requires Python in higher version than 3.4. More information will come soon! + +## Code Quality + +Apache Kibble project is using [pre-commits](https://pre-commit.com) to ensure the quality of the code. +We encourage you to use pre-commits, but it's not required in order to contribute. Every change is checked +on CI and if it does not pass the tests it cannot be accepted. If you want to check locally then +you should install Python3.6 or newer together and run: +```bash +pip install pre-commit +# or +brew install pre-commit +``` +For more installation options visit the [pre-commits](https://pre-commit.com). + +To turn on pre-commit checks for commit operations in git, run: +```bash +pre-commit install +``` +To run all checks on your staged files, run: +```bash +pre-commit run +``` +To run all checks on all files, run: +```bash +pre-commit run --all-files +``` From 2002a53fa3e1e888154681061173f6f976ae63ba Mon Sep 17 00:00:00 2001 From: Tomek Urbaszek Date: Fri, 16 Oct 2020 12:26:39 +0200 Subject: [PATCH 03/48] Apply pre-commit checks to all files and make CI green (#58) --- .github/ISSUE_TEMPLATE/feature_request.md | 2 +- CODE_OF_CONDUCT.md | 4 +- NOTICE | 3 +- README.md | 2 +- api/handler.py | 16 +- api/pages/__init__.py | 4 +- api/pages/account.py | 33 +- api/pages/bio/bio.py | 34 +- api/pages/bio/newtimers.py | 96 +-- api/pages/bio/trends.py | 80 +- api/pages/ci/queue.py | 32 +- api/pages/ci/status.py | 26 +- api/pages/ci/top-buildcount.py | 24 +- api/pages/ci/top-buildtime.py | 26 +- api/pages/code/changes.py | 30 +- api/pages/code/commits.py | 32 +- api/pages/code/committers.py | 36 +- api/pages/code/evolution.py | 30 +- api/pages/code/pony-timeseries.py | 42 +- api/pages/code/pony.py | 56 +- api/pages/code/punchcard.py | 30 +- api/pages/code/relationships.py | 46 +- api/pages/code/retention.py | 50 +- api/pages/code/sloc.py | 22 +- api/pages/code/top-commits.py | 34 +- api/pages/code/top-sloc.py | 24 +- api/pages/code/trends.py | 61 +- api/pages/filters.py | 6 +- api/pages/forum/actors.py | 38 +- api/pages/forum/creators.py | 26 +- api/pages/forum/issues.py | 46 +- api/pages/forum/responders.py | 28 +- api/pages/forum/top-count.py | 30 +- api/pages/forum/top.py | 26 +- api/pages/forum/trends.py | 68 +- api/pages/issue/actors.py | 38 +- api/pages/issue/age.py | 26 +- api/pages/issue/closers.py | 28 +- api/pages/issue/issues.py | 46 +- api/pages/issue/openers.py | 26 +- api/pages/issue/pony-timeseries.py | 42 +- api/pages/issue/relationships.py | 48 +- api/pages/issue/retention.py | 52 +- api/pages/issue/top-count.py | 30 +- api/pages/issue/top.py | 26 +- api/pages/issue/trends.py | 68 +- api/pages/mail/map.py | 50 +- api/pages/mail/mood-timeseries.py | 36 +- api/pages/mail/mood.py | 50 +- api/pages/mail/pony-timeseries.py | 36 +- api/pages/mail/relationships.py | 42 +- api/pages/mail/retention.py | 50 +- api/pages/mail/timeseries-single.py | 26 +- api/pages/mail/timeseries.py | 28 +- api/pages/mail/top-authors.py | 30 +- api/pages/mail/top-topics.py | 26 +- api/pages/mail/trends.py | 60 +- api/pages/org/contributors.py | 26 +- api/pages/org/list.py | 12 +- api/pages/org/members.py | 42 +- api/pages/org/sourcetypes.py | 8 +- api/pages/org/trends.py | 38 +- api/pages/session.py | 27 +- api/pages/sources.py | 38 +- api/pages/verify.py | 9 +- api/pages/views.py | 30 +- api/pages/widgets.py | 8 +- api/plugins/database.py | 9 +- api/plugins/openapi.py | 48 +- api/plugins/session.py | 17 +- api/yaml/openapi/combine.py | 6 +- .../components/schemas/OrgMembers.yaml | 2 - .../components/schemas/Organisation.yaml | 1 - .../components/schemas/PhraseList.yaml | 1 - .../components/schemas/SourceListAdd.yaml | 2 +- .../components/schemas/defaultWidgetArgs.yaml | 1 - api/yaml/sourcetypes.yaml | 8 +- docs/Makefile | 2 +- docs/source/conf.py | 3 - docs/source/managing.rst | 2 +- docs/source/usecases.rst | 38 +- setup/kibble.yaml.sample | 3 +- setup/makeaccount.py | 1 - setup/setup.py | 2 +- ui/apidoc.html | 762 +++++++++--------- ui/contributors.html | 6 +- ui/css/c3.css | 8 +- ui/css/daterangepicker.css | 1 - ui/css/kibble.min.css | 8 +- ui/dashboard.html | 8 +- ui/engagement.html | 6 +- ui/index.html | 6 +- ui/js/app.js | 8 +- ui/js/c3.min.js | 2 +- ui/js/coffee/account.coffee | 5 +- ui/js/coffee/charts_gauge.coffee | 9 +- ui/js/coffee/charts_linechart.coffee | 2 +- ui/js/coffee/charts_linked_map.coffee | 76 +- ui/js/coffee/charts_punchcard.coffee | 23 +- ui/js/coffee/charts_radar.coffee | 57 +- ui/js/coffee/charts_wrapper.coffee | 65 +- ui/js/coffee/colors.coffee | 12 +- ui/js/coffee/combine.sh | 1 - ui/js/coffee/datepicker.coffee | 11 +- ui/js/coffee/error_modal.coffee | 3 +- ui/js/coffee/explorer.coffee | 130 ++- ui/js/coffee/kibble_account.coffee | 14 +- ui/js/coffee/kibble_organisation.coffee | 36 +- ui/js/coffee/misc.coffee | 22 +- ui/js/coffee/pageloader.coffee | 22 +- ui/js/coffee/phonebook.coffee | 7 +- ui/js/coffee/sources.coffee | 44 +- ui/js/coffee/widget.coffee | 69 +- ui/js/coffee/widget_admin.coffee | 33 +- ui/js/coffee/widget_affiliations.coffee | 9 +- ui/js/coffee/widget_bio.coffee | 8 +- ui/js/coffee/widget_comstat.coffee | 42 +- ui/js/coffee/widget_donut.coffee | 24 +- ui/js/coffee/widget_factors.coffee | 6 +- ui/js/coffee/widget_jsondump.coffee | 1 - ui/js/coffee/widget_map.coffee | 11 +- ui/js/coffee/widget_messages.coffee | 43 +- ui/js/coffee/widget_mvp.coffee | 12 +- ui/js/coffee/widget_paragraph.coffee | 3 - ui/js/coffee/widget_preferences.coffee | 9 +- ui/js/coffee/widget_publisher.coffee | 7 +- ui/js/coffee/widget_punchcard.coffee | 6 +- ui/js/coffee/widget_radar.coffee | 16 +- ui/js/coffee/widget_relation.coffee | 14 +- ui/js/coffee/widget_report.coffee | 31 +- ui/js/coffee/widget_top5.coffee | 9 +- ui/js/coffee/widget_treemap.coffee | 29 +- ui/js/coffee/widget_trend.coffee | 11 +- ui/js/coffee/widget_views.coffee | 30 +- ui/js/core.js | 2 +- ui/js/d3.min.js | 2 +- ui/js/datepicker/daterangepicker.js | 2 +- ui/js/moment/moment.min.js | 2 +- ui/login.html | 16 +- ui/organisations.html | 8 +- ui/relationships.html | 6 +- 141 files changed, 2069 insertions(+), 2137 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md index 753f97da..bf1a4b79 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -1,6 +1,6 @@ --- name: Feature request -about: Idea or feature request +about: Idea or feature request title: '' labels: 'kind:feature' assignees: '' diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index 5fd7277c..d5c66bb2 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -1,7 +1,7 @@ # Code of Conduct -The Apache Kibble project follows the +The Apache Kibble project follows the [Apache Software Foundation code of conduct](https://www.apache.org/foundation/policies/conduct.html). -If you observe behavior that violates those rules please follow the +If you observe behavior that violates those rules please follow the [ASF reporting guidelines](https://www.apache.org/foundation/policies/conduct#reporting-guidelines). diff --git a/NOTICE b/NOTICE index 790683a8..66e7dc12 100644 --- a/NOTICE +++ b/NOTICE @@ -119,7 +119,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. Metis Dashboard (MIT License) ------------------------------------------------------------------------ -Copyright (c) 2015 onokumus +Copyright (c) 2015 onokumus Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to @@ -186,4 +186,3 @@ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - diff --git a/README.md b/README.md index 7efe0455..77d5116e 100644 --- a/README.md +++ b/README.md @@ -8,7 +8,7 @@ Apache Kibble is a tool to collect, aggregate and visualize data about any softw for the scanners to connect to, and provides the overall management of sources as well as the visualizations and API end points. - **Kibble scanners** ([kibble-scanners](https://github.com/apache/kibble-scanners)) - a collection of - scanning applications each designed to work with a specific type of resource (git repo, mailing list, + scanning applications each designed to work with a specific type of resource (git repo, mailing list, JIRA, etc) and push compiled data objects to the Kibble Server. ## Documentation diff --git a/api/handler.py b/api/handler.py index d767e692..41663a53 100644 --- a/api/handler.py +++ b/api/handler.py @@ -56,7 +56,7 @@ class KibbleHTTPError(Exception): def __init__(self, code, message): self.code = code self.message = message - + class KibbleAPIWrapper: """ @@ -67,7 +67,7 @@ def __init__(self, path, func): self.API = KibbleOpenAPI self.path = path self.exception = KibbleHTTPError - + def __call__(self, environ, start_response, session): """Run the function, return response OR return stacktrace""" response = None @@ -90,7 +90,7 @@ def __call__(self, environ, start_response, session): "reason": "Invalid JSON: %s" % err }) return - + # Validate URL against OpenAPI specs try: self.API.validate(environ['REQUEST_METHOD'], self.path, formdata) @@ -102,7 +102,7 @@ def __call__(self, environ, start_response, session): "reason": err.message }) return - + # Call page with env, SR and form data try: response = self.func(self, environ, formdata, session) @@ -124,7 +124,7 @@ def __call__(self, environ, start_response, session): "reason": err.message }, indent = 4) + "\n" return - + except: err_type, err_value, tb = sys.exc_info() traceback_output = ['API traceback:'] @@ -140,8 +140,8 @@ def __call__(self, environ, start_response, session): "code": "500", "reason": '\n'.join(traceback_output) }) - - + + def fourohfour(environ, start_response): """A very simple 404 handler""" start_response("404 Not Found", [ @@ -181,7 +181,7 @@ def application(environ, start_response): elif isinstance(bucket, bytes): yield bucket return - + for bucket in fourohfour(environ, start_response): yield bytes(bucket, encoding = 'utf-8') diff --git a/api/pages/__init__.py b/api/pages/__init__.py index 67a30574..1f8ef384 100644 --- a/api/pages/__init__.py +++ b/api/pages/__init__.py @@ -42,5 +42,5 @@ def loadPage(path): xp = p.replace('.', '/') print("Loading endpoint pages.%s as %s" % (p, xp)) handlers[xp] = importlib.import_module("pages.%s" % p) - -loadPage(rootpath) \ No newline at end of file + +loadPage(rootpath) diff --git a/api/pages/account.py b/api/pages/account.py index 48577735..c3d700bd 100644 --- a/api/pages/account.py +++ b/api/pages/account.py @@ -85,7 +85,7 @@ # $ref: '#/components/schemas/Error' # description: unexpected error # summary: Create a new account -# +# ######################################################################## @@ -125,7 +125,7 @@ def sendCode(session, addr, code): s.quit() def run(API, environ, indata, session): - + method = environ['REQUEST_METHOD'] # Add a new account?? @@ -133,32 +133,32 @@ def run(API, environ, indata, session): u = indata['email'] p = indata['password'] d = indata['displayname'] - + # Are new accounts allowed? (admin can always make accounts, of course) if not session.config['accounts'].get('allowSignup', False): if not (session.user and session.user['level'] == 'admin'): raise API.exception(403, "New account requests have been administratively disabled.") - + # Check if we already have that username in use if session.DB.ES.exists(index=session.DB.dbname, doc_type='useraccount', id = u): raise API.exception(403, "Username already in use") - + # We require a username, displayName password of at least 3 chars each if len(p) < 3 or len(u) < 3 or len(d) < 3: raise API.exception(400, "Username, display-name and password must each be at elast 3 characters long.") - + # We loosely check that the email is an email if not re.match(r"^\S+@\S+\.\S+$", u): raise API.exception(400, "Invalid email address presented.") - + # Okay, let's make an account...I guess salt = bcrypt.gensalt() pwd = bcrypt.hashpw(p.encode('utf-8'), salt).decode('ascii') - + # Verification code, if needed vsalt = bcrypt.gensalt() vcode = hashlib.sha1(vsalt).hexdigest() - + # Auto-verify unless verification is enabled. # This is so previously unverified accounts don'thave to verify # if we later turn verification on. @@ -167,7 +167,7 @@ def run(API, environ, indata, session): verified = False sendCode(session, u, vcode) # Send verification email # If verification email fails, skip account creation. - + doc = { 'email': u, # Username (email) 'password': pwd, # Hashed password @@ -179,24 +179,24 @@ def run(API, environ, indata, session): 'vcode': vcode, # Verification code 'userlevel': "user" # User level (user/admin) } - - + + # If we have auto-invite on, check if there are orgs to invite to if 'autoInvite' in session.config['accounts']: dom = u.split('@')[-1].lower() for ai in session.config['accounts']['autoInvite']: if ai['domain'] == dom: doc['organisations'].append(ai['organisation']) - + session.DB.ES.index(index=session.DB.dbname, doc_type='useraccount', id = u, body = doc) yield json.dumps({"message": "Account created!", "verified": verified}) return - + # We need to be logged in for the rest of this! if not session.user: raise API.exception(403, "You must be logged in to use this API endpoint! %s") - - + + # Patch (edit) an account if method == "PATCH": userid = session.user['email'] @@ -217,4 +217,3 @@ def run(API, environ, indata, session): session.DB.ES.index(index=session.DB.dbname, doc_type='useraccount', id = userid, body = udoc) yield json.dumps({"message": "Account updated!"}) return - \ No newline at end of file diff --git a/api/pages/bio/bio.py b/api/pages/bio/bio.py index 0f43f9a3..c3aa5338 100644 --- a/api/pages/bio/bio.py +++ b/api/pages/bio/bio.py @@ -56,7 +56,7 @@ # security: # - cookieAuth: [] # summary: Shows some facts about a contributor -# +# ######################################################################## @@ -72,30 +72,30 @@ import hashlib def run(API, environ, indata, session): - + # We need to be logged in for this! if not session.user: raise API.exception(403, "You must be logged in to use this API endpoint! %s") - + now = time.time() - + # First, fetch the view if we have such a thing enabled viewList = [] if indata.get('view'): viewList = session.getView(indata.get('view')) if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - - + viewList = session.subFilter(indata.get('subfilter'), view = viewList) + + dOrg = session.user['defaultOrganisation'] or "apache" - + pid = hashlib.sha1( ("%s%s" % (dOrg, indata.get('email', '???'))).encode('ascii', errors='replace')).hexdigest() person = {} if session.DB.ES.exists(index=session.DB.dbname, doc_type="person", id = pid): person = session.DB.ES.get(index=session.DB.dbname, doc_type="person", id = pid)['_source'] else: raise API.exception(404, "No such biography!") - + query = { 'query': { 'bool': { @@ -125,8 +125,8 @@ def run(API, environ, indata, session): {'term': {codeKey: indata.get('email')}}, ] query['query']['bool']['minimum_should_match'] = 1 - - + + # FIRST EMAIL res = session.DB.ES.search( index=session.DB.dbname, @@ -136,7 +136,7 @@ def run(API, environ, indata, session): firstEmail = None if res['hits']['hits']: firstEmail = res['hits']['hits'][0]['_source']['ts'] - + # FIRST COMMIT res = session.DB.ES.search( index=session.DB.dbname, @@ -146,7 +146,7 @@ def run(API, environ, indata, session): firstCommit = None if res['hits']['hits']: firstCommit = res['hits']['hits'][0]['_source']['ts'] - + # FIRST AUTHORSHIP query['query']['bool']['should'][3] = {'term': {'author_email': indata.get('email')}} res = session.DB.ES.search( @@ -157,8 +157,8 @@ def run(API, environ, indata, session): firstAuthor = None if res['hits']['hits']: firstAuthor = res['hits']['hits'][0]['_source']['ts'] - - + + # COUNT EMAIL, CODE, LINES CHANGED del query['sort'] del query['size'] @@ -167,13 +167,13 @@ def run(API, environ, indata, session): doc_type="email", body = query )['count'] - + no_commits = session.DB.ES.count( index=session.DB.dbname, doc_type="code_commit", body = query )['count'] - + JSON_OUT = { 'found': True, 'bio': { diff --git a/api/pages/bio/newtimers.py b/api/pages/bio/newtimers.py index 12245f16..8dd4dc20 100644 --- a/api/pages/bio/newtimers.py +++ b/api/pages/bio/newtimers.py @@ -56,7 +56,7 @@ # security: # - cookieAuth: [] # summary: Shows some facts about a contributor -# +# ######################################################################## @@ -75,7 +75,7 @@ def find_earlier(session, query, when, who, which, where, doctype, dOrg): """Find earlier document pertaining to this user. return True if found""" if 'aggs' in query: del query['aggs'] - + rangeQuery = {'range': { which: { @@ -84,7 +84,7 @@ def find_earlier(session, query, when, who, which, where, doctype, dOrg): } } } - + query['query']['bool']['must'] = [ rangeQuery, { @@ -96,12 +96,12 @@ def find_earlier(session, query, when, who, which, where, doctype, dOrg): 'term': { where: who } - + } ] query['size'] = 1 query['sort'] = [{ which: 'asc' }] - + res = session.DB.ES.search( index=session.DB.dbname, doc_type=doctype, @@ -115,40 +115,40 @@ def find_earlier(session, query, when, who, which, where, doctype, dOrg): return [-1, None] else: return [-1, None] - + def run(API, environ, indata, session): - + # We need to be logged in for this! if not session.user: raise API.exception(403, "You must be logged in to use this API endpoint! %s") - + now = time.time() - + # First, fetch the view if we have such a thing enabled viewList = [] if indata.get('view'): viewList = session.getView(indata.get('view')) if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - - + viewList = session.subFilter(indata.get('subfilter'), view = viewList) + + dOrg = session.user['defaultOrganisation'] or "apache" - - + + # Keep track of all contributors, and newcomers contributors = [] newcomers = {} - + #################################################################### # Start by grabbing all contributors this period via terms agg # #################################################################### dateTo = indata.get('to', int(time.time())) dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span - - - - + + + + ############################ # CODE NEWTIMERS # ############################ @@ -161,7 +161,7 @@ def run(API, environ, indata, session): } } } - + query = { 'query': { 'bool': { @@ -176,46 +176,46 @@ def run(API, environ, indata, session): } } } - + query['aggs'] = { 'by_committer': { 'terms': { 'field': 'committer_email', 'size': 500 - } + } }, 'by_author': { 'terms': { 'field': 'author_email', 'size': 500 - } + } } } - + # Source-specific or view-specific?? if indata.get('source'): query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) elif viewList: query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) - - + + res = session.DB.ES.search( index=session.DB.dbname, doc_type="code_commit", body = query ) - + code_contributors = [] for bucket in res['aggregations']['by_committer']['buckets']: email = bucket['key'] if email not in code_contributors: code_contributors.append(email) - + for bucket in res['aggregations']['by_author']['buckets']: email = bucket['key'] if email not in code_contributors: code_contributors.append(email) - + # Now, for each contributor, find if they have done anything before for email in code_contributors: ea = find_earlier(session, query, dateFrom, email, 'ts', 'author_email', 'code_commit', dOrg) @@ -227,9 +227,9 @@ def run(API, environ, indata, session): newcomers[email] = { 'code': earliest } - - - + + + ############################ # ISSUE NEWTIMERS # ############################ @@ -242,7 +242,7 @@ def run(API, environ, indata, session): } } } - + query = { 'query': { 'bool': { @@ -257,46 +257,46 @@ def run(API, environ, indata, session): } } } - + query['aggs'] = { 'by_creator': { 'terms': { 'field': 'issueCreator', 'size': 500 - } + } }, 'by_closer': { 'terms': { 'field': 'issueCloser', 'size': 500 - } + } } } - + # Source-specific or view-specific?? if indata.get('source'): query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) elif viewList: query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) - - + + res = session.DB.ES.search( index=session.DB.dbname, doc_type="issue", body = query ) - + issue_contributors = [] for bucket in res['aggregations']['by_creator']['buckets']: email = bucket['key'] if email not in issue_contributors: issue_contributors.append(email) - + for bucket in res['aggregations']['by_closer']['buckets']: email = bucket['key'] if email not in issue_contributors: issue_contributors.append(email) - + # Now, for each contributor, find if they have done anything before for email in issue_contributors: ecr = find_earlier(session, query, dateFrom, email, 'created', 'issueCreator', 'issue', dOrg) @@ -307,13 +307,13 @@ def run(API, environ, indata, session): earliest = ecl newcomers[email] = newcomers.get(email, {}) newcomers[email]['issue'] = earliest - + email_contributors = [] - + ################################ # For each newtimer, get a bio # ################################ - + for email in newcomers: pid = hashlib.sha1( ("%s%s" % (dOrg, email)).encode('ascii', errors='replace')).hexdigest() person = {} @@ -321,11 +321,11 @@ def run(API, environ, indata, session): person = session.DB.ES.get(index=session.DB.dbname, doc_type="person", id = pid)['_source'] person['md5'] = hashlib.md5(person['email'].encode('utf-8')).hexdigest() # gravatar needed for UI! newcomers[email]['bio'] = person - + newcomers_code = [] newcomers_issues = [] newcomers_email = [] - + # Count newcomers in each category (TODO: put this elsewhere earlier) for email, entry in newcomers.items(): if 'code' in entry: @@ -334,7 +334,7 @@ def run(API, environ, indata, session): newcomers_issues.append(email) if 'email' in entry: newcomers_email.append(email) - + JSON_OUT = { 'okay': True, 'stats': { diff --git a/api/pages/bio/trends.py b/api/pages/bio/trends.py index 776779e9..7e5e92b7 100644 --- a/api/pages/bio/trends.py +++ b/api/pages/bio/trends.py @@ -56,7 +56,7 @@ # security: # - cookieAuth: [] # summary: Shows a quick trend summary of the past 6 months for a contributor -# +# ######################################################################## @@ -71,36 +71,36 @@ import time def run(API, environ, indata, session): - + # We need to be logged in for this! if not session.user: raise API.exception(403, "You must be logged in to use this API endpoint! %s") - + now = time.time() - + # First, fetch the view if we have such a thing enabled viewList = [] if indata.get('view'): viewList = session.getView(indata.get('view')) if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - - + viewList = session.subFilter(indata.get('subfilter'), view = viewList) + + dateTo = indata.get('to', int(time.time())) dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span if dateFrom < 0: dateFrom = 0 dateYonder = dateFrom - (dateTo - dateFrom) - - + + dOrg = session.user['defaultOrganisation'] or "apache" - + #################################################################### # We start by doing all the queries for THIS period. # # Then we reset the query, and change date to yonder-->from # # and rerun the same queries. # #################################################################### - + rangeKey = 'created' rangeQuery = {'range': { @@ -139,8 +139,8 @@ def run(API, environ, indata, session): {'term': {codeKey: indata.get('email')}}, ] query['query']['bool']['minimum_should_match'] = 1 - - + + # ISSUES CREATED res = session.DB.ES.count( index=session.DB.dbname, @@ -148,8 +148,8 @@ def run(API, environ, indata, session): body = query ) no_issues_created = res['count'] - - + + # ISSUES CLOSED rangeKey = "closed" query['query']['bool']['must'][0] = {'range': @@ -160,15 +160,15 @@ def run(API, environ, indata, session): } } } - + res = session.DB.ES.count( index=session.DB.dbname, doc_type="issue", body = query ) no_issues_closed = res['count'] - - + + # EMAIL SENT rangeKey = "ts" query['query']['bool']['must'][0] = {'range': @@ -179,14 +179,14 @@ def run(API, environ, indata, session): } } } - + res = session.DB.ES.count( index=session.DB.dbname, doc_type="email", body = query ) no_email_sent = res['count'] - + # COMMITS MADE rangeKey = "ts" query['query']['bool']['must'][0] = {'range': @@ -197,20 +197,20 @@ def run(API, environ, indata, session): } } } - + res = session.DB.ES.count( index=session.DB.dbname, doc_type="code_commit", body = query ) no_commits = res['count'] - - - + + + #################################################################### # Change to PRIOR SPAN # #################################################################### - + # ISSUES OPENED rangeKey = "created" query['query']['bool']['must'][0] = {'range': @@ -221,16 +221,16 @@ def run(API, environ, indata, session): } } } - + res = session.DB.ES.count( index=session.DB.dbname, doc_type="issue", body = query ) no_issues_created_before = res['count'] - - - + + + # ISSUES CLOSED rangeKey = "closed" query['query']['bool']['must'][0] = {'range': @@ -241,15 +241,15 @@ def run(API, environ, indata, session): } } } - + res = session.DB.ES.count( index=session.DB.dbname, doc_type="issue", body = query ) no_issues_closed_before = res['count'] - - + + # EMAIL SENT rangeKey = "ts" query['query']['bool']['must'][0] = {'range': @@ -260,15 +260,15 @@ def run(API, environ, indata, session): } } } - - + + res = session.DB.ES.count( index=session.DB.dbname, doc_type="email", body = query ) no_email_sent_before = res['count'] - + # CODE COMMITS rangeKey = "ts" query['query']['bool']['must'][0] = {'range': @@ -279,16 +279,16 @@ def run(API, environ, indata, session): } } } - - + + res = session.DB.ES.count( index=session.DB.dbname, doc_type="code_commit", body = query ) no_commits_before = res['count'] - - + + trends = { "created": { 'before': no_issues_created_before, @@ -311,7 +311,7 @@ def run(API, environ, indata, session): 'title': "Commits this period" } } - + JSON_OUT = { 'trends': trends, 'okay': True, diff --git a/api/pages/ci/queue.py b/api/pages/ci/queue.py index 2ef9f729..bba2f65d 100644 --- a/api/pages/ci/queue.py +++ b/api/pages/ci/queue.py @@ -56,7 +56,7 @@ # security: # - cookieAuth: [] # summary: Shows CI queue over time -# +# ######################################################################## @@ -70,29 +70,29 @@ import hashlib def run(API, environ, indata, session): - + # We need to be logged in for this! if not session.user: raise API.exception(403, "You must be logged in to use this API endpoint! %s") - + now = time.time() - + # First, fetch the view if we have such a thing enabled viewList = [] if indata.get('view'): viewList = session.getView(indata.get('view')) if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - + viewList = session.subFilter(indata.get('subfilter'), view = viewList) + # We only want build sources, so we can sum up later. viewList = session.subType(['jenkins', 'travis', 'buildbot'], viewList) - + dateTo = indata.get('to', int(time.time())) dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span - + interval = indata.get('interval', 'month') - - + + #################################################################### #################################################################### dOrg = session.user['defaultOrganisation'] or "apache" @@ -120,13 +120,13 @@ def run(API, environ, indata, session): # Source-specific or view-specific?? if indata.get('source'): viewList = [indata.get('source')] - + query['query']['bool']['must'].append({'term': {'sourceID': 'x'}}) - + timeseries = [] for source in viewList: query['query']['bool']['must'][2] = {'term': {'sourceID': source}} - + # Get queue stats query['aggs'] = { 'timeseries': { @@ -175,7 +175,7 @@ def run(API, environ, indata, session): bucket['wait']['value'] = bucket['wait'].get('value', 0) or 0 if bucket['doc_count'] == 0: continue - + found = False for t in timeseries: if t['date'] == ts: @@ -192,11 +192,11 @@ def run(API, environ, indata, session): 'average wait (hours)': bucket['wait']['value'], 'builders': 1, }) - + for t in timeseries: t['average wait (hours)'] = int(t['average wait (hours)']/360)/10.0 del t['builders'] - + JSON_OUT = { 'widgetType': { 'chartType': 'line', # Recommendation for the UI diff --git a/api/pages/ci/status.py b/api/pages/ci/status.py index a1a8aac8..2891a791 100644 --- a/api/pages/ci/status.py +++ b/api/pages/ci/status.py @@ -56,7 +56,7 @@ # security: # - cookieAuth: [] # summary: Shows CI queue over time -# +# ######################################################################## @@ -70,27 +70,27 @@ import hashlib def run(API, environ, indata, session): - + # We need to be logged in for this! if not session.user: raise API.exception(403, "You must be logged in to use this API endpoint! %s") - + now = time.time() - + # First, fetch the view if we have such a thing enabled viewList = [] if indata.get('view'): viewList = session.getView(indata.get('view')) if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - - + viewList = session.subFilter(indata.get('subfilter'), view = viewList) + + dateTo = indata.get('to', int(time.time())) dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span - + interval = indata.get('interval', 'month') - - + + #################################################################### #################################################################### dOrg = session.user['defaultOrganisation'] or "apache" @@ -120,7 +120,7 @@ def run(API, environ, indata, session): query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) elif viewList: query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) - + # Get queue stats query['aggs'] = { 'timeseries': { @@ -158,7 +158,7 @@ def run(API, environ, indata, session): size = 0, body = query ) - + timeseries = [] for bucket in res['aggregations']['timeseries']['buckets']: if bucket['doc_count'] == 0: @@ -169,7 +169,7 @@ def run(API, environ, indata, session): 'builds blocked': bucket['blocked']['value'], 'builds stuck': bucket['stuck']['value'] }) - + JSON_OUT = { 'widgetType': { 'chartType': 'bar' # Recommendation for the UI diff --git a/api/pages/ci/top-buildcount.py b/api/pages/ci/top-buildcount.py index aa704058..d688346e 100644 --- a/api/pages/ci/top-buildcount.py +++ b/api/pages/ci/top-buildcount.py @@ -56,7 +56,7 @@ # security: # - cookieAuth: [] # summary: Shows top 25 jobs by total builds done. Essentially buildtime, tweaked -# +# ######################################################################## @@ -72,23 +72,23 @@ import re def run(API, environ, indata, session): - + # We need to be logged in for this! if not session.user: raise API.exception(403, "You must be logged in to use this API endpoint! %s") - + now = time.time() - + # First, fetch the view if we have such a thing enabled viewList = [] if indata.get('view'): viewList = session.getView(indata.get('view')) if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - + viewList = session.subFilter(indata.get('subfilter'), view = viewList) + dateTo = indata.get('to', int(time.time())) dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span - + #################################################################### #################################################################### dOrg = session.user['defaultOrganisation'] or "apache" @@ -118,7 +118,7 @@ def run(API, environ, indata, session): query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) elif viewList: query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) - + query['aggs'] = { 'by_job': { 'terms': { @@ -146,14 +146,14 @@ def run(API, environ, indata, session): } } } - + res = session.DB.ES.search( index=session.DB.dbname, doc_type="ci_build", size = 0, body = query ) - + jobs = [] for doc in res['aggregations']['by_job']['buckets']: job = doc['key'] @@ -162,12 +162,12 @@ def run(API, environ, indata, session): ci = doc['ci']['buckets'][0]['key'] jobname = doc['name']['buckets'][0]['key'] jobs.append([builds, duration, jobname, ci]) - + topjobs = sorted(jobs, key = lambda x: int(x[0]), reverse = True) tophash = {} for v in topjobs: tophash["%s (%s)" % (v[2], v[3])] = v[0] - + JSON_OUT = { 'counts': tophash, 'okay': True, diff --git a/api/pages/ci/top-buildtime.py b/api/pages/ci/top-buildtime.py index 6aded754..46fafca9 100644 --- a/api/pages/ci/top-buildtime.py +++ b/api/pages/ci/top-buildtime.py @@ -56,7 +56,7 @@ # security: # - cookieAuth: [] # summary: Shows top 25 jobs by total build time spent -# +# ######################################################################## @@ -72,23 +72,23 @@ import re def run(API, environ, indata, session): - + # We need to be logged in for this! if not session.user: raise API.exception(403, "You must be logged in to use this API endpoint! %s") - + now = time.time() - + # First, fetch the view if we have such a thing enabled viewList = [] if indata.get('view'): viewList = session.getView(indata.get('view')) if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - + viewList = session.subFilter(indata.get('subfilter'), view = viewList) + dateTo = indata.get('to', int(time.time())) dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span - + #################################################################### #################################################################### dOrg = session.user['defaultOrganisation'] or "apache" @@ -118,7 +118,7 @@ def run(API, environ, indata, session): query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) elif viewList: query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) - + query['aggs'] = { 'by_job': { 'terms': { @@ -146,14 +146,14 @@ def run(API, environ, indata, session): } } } - + res = session.DB.ES.search( index=session.DB.dbname, doc_type="ci_build", size = 0, body = query ) - + jobs = [] for doc in res['aggregations']['by_job']['buckets']: job = doc['key'] @@ -162,7 +162,7 @@ def run(API, environ, indata, session): ci = doc['ci']['buckets'][0]['key'] jobname = doc['name']['buckets'][0]['key'] jobs.append([builds, duration, jobname, ci]) - + topjobs = sorted(jobs, key = lambda x: int(x[1]), reverse = True) top = topjobs[0:24] if len(topjobs) > 25: @@ -170,11 +170,11 @@ def run(API, environ, indata, session): for repo in topjobs[24:]: count += repo[1] top.append([1, count, "Other jobs", '??']) - + tophash = {} for v in top: tophash["%s (%s)" % (v[2], v[3])] = int((v[1]/360000))/10 - + JSON_OUT = { 'counts': tophash, 'okay': True, diff --git a/api/pages/code/changes.py b/api/pages/code/changes.py index c6233d4f..72a90cf1 100644 --- a/api/pages/code/changes.py +++ b/api/pages/code/changes.py @@ -56,7 +56,7 @@ # security: # - cookieAuth: [] # summary: Show insertions/deletions as a timeseries -# +# ######################################################################## @@ -71,33 +71,33 @@ import time def run(API, environ, indata, session): - + # We need to be logged in for this! if not session.user: raise API.exception(403, "You must be logged in to use this API endpoint! %s") - + now = time.time() - + # First, fetch the view if we have such a thing enabled viewList = [] if indata.get('view'): viewList = session.getView(indata.get('view')) if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - - + viewList = session.subFilter(indata.get('subfilter'), view = viewList) + + dateTo = indata.get('to', int(time.time())) dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span - + which = 'committer_email' role = 'committer' if indata.get('author', False): which = 'author_email' role = 'author' - + interval = indata.get('interval', 'day') - - + + #################################################################### #################################################################### dOrg = session.user['defaultOrganisation'] or "apache" @@ -130,7 +130,7 @@ def run(API, environ, indata, session): if indata.get('email'): query['query']['bool']['should'] = [{'term': {'committer_email': indata.get('email')}}, {'term': {'author_email': indata.get('email')}}] query['query']['bool']['minimum_should_match'] = 1 - + # Path filter? if indata.get('pathfilter'): pf = indata.get('pathfilter') @@ -140,7 +140,7 @@ def run(API, environ, indata, session): query['query']['bool']['must_not'].append({'regexp': {'files_changed': pf}}) else: query['query']['bool']['must'].append({'regexp': {'files_changed': pf}}) - + # Get timeseries for this period query['aggs'] = { 'per_interval': { @@ -162,7 +162,7 @@ def run(API, environ, indata, session): } } } - + res = session.DB.ES.search( index=session.DB.dbname, doc_type="code_commit", @@ -180,7 +180,7 @@ def run(API, environ, indata, session): 'insertions': icount, 'deletions': dcount }) - + JSON_OUT = { 'timeseries': timeseries, 'interval': interval, diff --git a/api/pages/code/commits.py b/api/pages/code/commits.py index 2899f756..54bb4763 100644 --- a/api/pages/code/commits.py +++ b/api/pages/code/commits.py @@ -56,7 +56,7 @@ # security: # - cookieAuth: [] # summary: Show commits as a timeseries -# +# ######################################################################## @@ -72,33 +72,33 @@ import hashlib def run(API, environ, indata, session): - + # We need to be logged in for this! if not session.user: raise API.exception(403, "You must be logged in to use this API endpoint! %s") - + now = time.time() - + # First, fetch the view if we have such a thing enabled viewList = [] if indata.get('view'): viewList = session.getView(indata.get('view')) if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - - + viewList = session.subFilter(indata.get('subfilter'), view = viewList) + + dateTo = indata.get('to', int(time.time())) dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span - + which = 'committer_email' role = 'committer' if indata.get('author', False): which = 'author_email' role = 'author' - + interval = indata.get('interval', 'day') - - + + #################################################################### #################################################################### dOrg = session.user['defaultOrganisation'] or "apache" @@ -131,7 +131,7 @@ def run(API, environ, indata, session): if indata.get('email'): query['query']['bool']['should'] = [{'term': {'committer_email': indata.get('email')}}, {'term': {'author_email': indata.get('email')}}] query['query']['bool']['minimum_should_match'] = 1 - + # Path filter? if indata.get('pathfilter'): pf = indata.get('pathfilter') @@ -141,14 +141,14 @@ def run(API, environ, indata, session): query['query']['bool']['must_not'].append({'regexp': {'files_changed': pf}}) else: query['query']['bool']['must'].append({'regexp': {'files_changed': pf}}) - + # Get number of committers, this period query['aggs'] = { 'commits': { 'date_histogram': { 'field': 'date', 'interval': interval - } + } } } res = session.DB.ES.search( @@ -157,7 +157,7 @@ def run(API, environ, indata, session): size = 0, body = query ) - + timeseries = [] for bucket in res['aggregations']['commits']['buckets']: ts = int(bucket['key'] / 1000) @@ -166,7 +166,7 @@ def run(API, environ, indata, session): 'date': ts, 'commits': count }) - + JSON_OUT = { 'widgetType': { 'chartType': 'bar' # Recommendation for the UI diff --git a/api/pages/code/committers.py b/api/pages/code/committers.py index 7b6d5183..a1370984 100644 --- a/api/pages/code/committers.py +++ b/api/pages/code/committers.py @@ -56,7 +56,7 @@ # security: # - cookieAuth: [] # summary: Shows trend data for a set of repos over a given period of time -# +# ######################################################################## @@ -72,33 +72,33 @@ import hashlib def run(API, environ, indata, session): - + # We need to be logged in for this! if not session.user: raise API.exception(403, "You must be logged in to use this API endpoint! %s") - + now = time.time() - + # First, fetch the view if we have such a thing enabled viewList = [] if indata.get('view'): viewList = session.getView(indata.get('view')) if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - - + viewList = session.subFilter(indata.get('subfilter'), view = viewList) + + dateTo = indata.get('to', int(time.time())) dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span - + which = 'committer_email' role = 'committer' if indata.get('author', False): which = 'author_email' role = 'author' - + interval = indata.get('interval', 'month') - - + + #################################################################### #################################################################### dOrg = session.user['defaultOrganisation'] or "apache" @@ -131,7 +131,7 @@ def run(API, environ, indata, session): if indata.get('email'): query['query']['bool']['should'] = [{'term': {'committer_email': indata.get('email')}}, {'term': {'author_email': indata.get('email')}}] query['query']['bool']['minimum_should_match'] = 1 - + # Path filter? if indata.get('pathfilter'): pf = indata.get('pathfilter') @@ -141,7 +141,7 @@ def run(API, environ, indata, session): query['query']['bool']['must_not'].append({'regexp': {'files_changed': pf}}) else: query['query']['bool']['must'].append({'regexp': {'files_changed': pf}}) - + # Get top 25 committers this period query['aggs'] = { 'committers': { @@ -176,7 +176,7 @@ def run(API, environ, indata, session): }, } }, - + } res = session.DB.ES.search( index=session.DB.dbname, @@ -205,12 +205,12 @@ def run(API, environ, indata, session): 'insertions': int(bucket['byinsertions']['buckets'][0]['stats']['value']), 'deletions': int(bucket['bydeletions']['buckets'][0]['stats']['value']) } - + topN = [] for email, person in people.items(): topN.append(person) topN = sorted(topN, key = lambda x: x['count'], reverse = True) - + # Get timeseries for this period query['aggs'] = { 'per_interval': { @@ -232,7 +232,7 @@ def run(API, environ, indata, session): } } } - + res = session.DB.ES.search( index=session.DB.dbname, doc_type="code_commit", @@ -250,7 +250,7 @@ def run(API, environ, indata, session): 'committers': ccount, 'authors': acount }) - + JSON_OUT = { 'topN': { 'denoter': 'commits', diff --git a/api/pages/code/evolution.py b/api/pages/code/evolution.py index 593bd47c..8bc159bc 100644 --- a/api/pages/code/evolution.py +++ b/api/pages/code/evolution.py @@ -56,7 +56,7 @@ # security: # - cookieAuth: [] # summary: Show code evolution as a timeseries -# +# ######################################################################## @@ -72,25 +72,25 @@ import hashlib def run(API, environ, indata, session): - + # We need to be logged in for this! if not session.user: raise API.exception(403, "You must be logged in to use this API endpoint! %s") - + now = time.time() - + # First, fetch the view if we have such a thing enabled viewList = [] if indata.get('view'): viewList = session.getView(indata.get('view')) if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - - + viewList = session.subFilter(indata.get('subfilter'), view = viewList) + + breakdown = False onlycode = False - - + + #################################################################### #################################################################### dOrg = session.user['defaultOrganisation'] or "apache" @@ -120,7 +120,7 @@ def run(API, environ, indata, session): query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) elif viewList: query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) - + # We need scrolling here! res = session.DB.ES.search( index=session.DB.dbname, @@ -133,10 +133,10 @@ def run(API, environ, indata, session): scroll_size = res['hits']['total'] if type(scroll_size) is dict: scroll_size = scroll_size['value'] # ES >= 7.x - + timeseries = [] tstmp = {} - + while (scroll_size > 0): for doc in res['hits']['hits']: updates = doc['_source'] @@ -151,15 +151,15 @@ def run(API, environ, indata, session): item['code'] = item.get('code', 0) + (updates['loc'] or 0) item['comments'] = item.get('comments', 0) + (updates['comments'] or 0) item['blanks'] = item.get('blanks', 0) + (updates['blank'] or 0) - + res = session.DB.ES.scroll(scroll_id = sid, scroll = '1m') sid = res['_scroll_id'] scroll_size = len(res['hits']['hits']) - + for k, v in tstmp.items(): v['date'] = k timeseries.append(v) - + timeseries = sorted(timeseries, key = lambda x: x['date']) JSON_OUT = { 'widgetType': { diff --git a/api/pages/code/pony-timeseries.py b/api/pages/code/pony-timeseries.py index a7c1c8ec..5427b440 100644 --- a/api/pages/code/pony-timeseries.py +++ b/api/pages/code/pony-timeseries.py @@ -56,7 +56,7 @@ # security: # - cookieAuth: [] # summary: Shows timeseries of Pony Factor over time -# +# ######################################################################## @@ -74,31 +74,31 @@ import dateutil.relativedelta def run(API, environ, indata, session): - + # We need to be logged in for this! if not session.user: raise API.exception(403, "You must be logged in to use this API endpoint! %s") - + now = time.time() - + # First, fetch the view if we have such a thing enabled viewList = [] if indata.get('view'): viewList = session.getView(indata.get('view')) if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - - + viewList = session.subFilter(indata.get('subfilter'), view = viewList) + + hl = indata.get('span', 24) tnow = datetime.date.today() nm = tnow.month - (tnow.month % 3) ny = tnow.year ts = [] - + if nm < 1: nm += 12 ny = ny - 1 - + while ny > 1970: d = datetime.date(ny, nm, 1) t = time.mktime(d.timetuple()) @@ -108,8 +108,8 @@ def run(API, environ, indata, session): if nm < 1: nm += 12 ny = ny - 1 - - + + #################################################################### #################################################################### dOrg = session.user['defaultOrganisation'] or "apache" @@ -139,31 +139,31 @@ def run(API, environ, indata, session): query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) elif viewList: query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) - + # Get an initial count of commits res = session.DB.ES.count( index=session.DB.dbname, doc_type="code_commit", body = query ) - + globcount = res['count'] if globcount == 0: break - + # Get top 25 committers this period query['aggs'] = { 'by_committer': { 'terms': { 'field': 'committer_email', 'size': 1000 - } + } }, 'by_author': { 'terms': { 'field': 'author_email', 'size': 1000 - } + } } } res = session.DB.ES.search( @@ -172,8 +172,8 @@ def run(API, environ, indata, session): size = 0, body = query ) - - + + # PF for committers pf_committer = 0 pf_committer_count = 0 @@ -183,7 +183,7 @@ def run(API, environ, indata, session): pf_committer_count += count if pf_committer_count > int(globcount/2): break - + # PF for authors pf_author = 0 pf_author_count = 0 @@ -203,9 +203,9 @@ def run(API, environ, indata, session): 'Pony Factor (authorship)': pf_author, 'Meta-Pony Factor': len(cpf) }) - + ts = sorted(ts, key = lambda x: x['date']) - + JSON_OUT = { 'text': "This shows Pony Factors as calculated over a %u month timespan. Authorship measures the people writing the bulk of the codebase, committership mesaures the people committing (merging) the code, and meta-pony is an estimation of how many organisations/companies are involved." % hl, 'timeseries': ts, diff --git a/api/pages/code/pony.py b/api/pages/code/pony.py index 3eb074fe..ee64c8b5 100644 --- a/api/pages/code/pony.py +++ b/api/pages/code/pony.py @@ -56,7 +56,7 @@ # security: # - cookieAuth: [] # summary: Shows pony factor data for a set of repos over a given period of time -# +# ######################################################################## @@ -72,28 +72,28 @@ import re def run(API, environ, indata, session): - + # We need to be logged in for this! if not session.user: raise API.exception(403, "You must be logged in to use this API endpoint! %s") - + now = time.time() - + # First, fetch the view if we have such a thing enabled viewList = [] if indata.get('view'): viewList = session.getView(indata.get('view')) if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - - + viewList = session.subFilter(indata.get('subfilter'), view = viewList) + + dateTo = indata.get('to', int(time.time())) dateFrom = indata.get('from', dateTo - (86400*30*24)) # Default to a 24 month span if dateFrom < 0: dateFrom = 0 dateYonder = dateFrom - (dateTo - dateFrom) - - + + #################################################################### #################################################################### dOrg = session.user['defaultOrganisation'] or "apache" @@ -123,29 +123,29 @@ def run(API, environ, indata, session): query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) elif viewList: query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) - + # Get an initial count of commits res = session.DB.ES.count( index=session.DB.dbname, doc_type="code_commit", body = query ) - + globcount = res['count'] - + # Get top 25 committers this period query['aggs'] = { 'by_committer': { 'terms': { 'field': 'committer_email', 'size': 5000 - } + } }, 'by_author': { 'terms': { 'field': 'author_email', 'size': 5000 - } + } } } res = session.DB.ES.search( @@ -154,8 +154,8 @@ def run(API, environ, indata, session): size = 0, body = query ) - - + + # PF for committers pf_committer = 0 pf_committer_count = 0 @@ -165,7 +165,7 @@ def run(API, environ, indata, session): pf_committer_count += count if pf_committer_count > int(globcount/2): break - + # PF for authors pf_author = 0 pf_author_count = 0 @@ -178,8 +178,8 @@ def run(API, environ, indata, session): cpf[mldom] = True if pf_author_count > int(globcount/2): break - - + + #################################################################### #################################################################### dOrg = session.user['defaultOrganisation'] or "apache" @@ -209,29 +209,29 @@ def run(API, environ, indata, session): query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) elif viewList: query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) - + # Get an initial count of commits res = session.DB.ES.count( index=session.DB.dbname, doc_type="code_commit", body = query ) - + globcount = res['count'] - + # Get top 25 committers this period query['aggs'] = { 'by_committer': { 'terms': { 'field': 'committer_email', 'size': 5000 - } + } }, 'by_author': { 'terms': { 'field': 'author_email', 'size': 5000 - } + } } } res = session.DB.ES.search( @@ -240,8 +240,8 @@ def run(API, environ, indata, session): size = 0, body = query ) - - + + # PF for committers pf_committer_b = 0 pf_committer_count = 0 @@ -251,7 +251,7 @@ def run(API, environ, indata, session): pf_committer_count += count if pf_committer_count > int(globcount/2): break - + # PF for authors pf_author_b = 0 pf_author_count = 0 @@ -264,7 +264,7 @@ def run(API, environ, indata, session): cpf_b[mldom] = True if pf_author_count > int(globcount/2): break - + JSON_OUT = { 'factors': [ { diff --git a/api/pages/code/punchcard.py b/api/pages/code/punchcard.py index ab0a52f8..a8bf26bd 100644 --- a/api/pages/code/punchcard.py +++ b/api/pages/code/punchcard.py @@ -56,7 +56,7 @@ # security: # - cookieAuth: [] # summary: Show commits as a timeseries -# +# ######################################################################## @@ -72,33 +72,33 @@ import hashlib def run(API, environ, indata, session): - + # We need to be logged in for this! if not session.user: raise API.exception(403, "You must be logged in to use this API endpoint! %s") - + now = time.time() - + # First, fetch the view if we have such a thing enabled viewList = [] if indata.get('view'): viewList = session.getView(indata.get('view')) if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - - + viewList = session.subFilter(indata.get('subfilter'), view = viewList) + + dateTo = indata.get('to', int(time.time())) dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span - + which = 'committer_email' role = 'committer' if indata.get('author', False): which = 'author_email' role = 'author' - + interval = indata.get('interval', 'day') - - + + #################################################################### #################################################################### dOrg = session.user['defaultOrganisation'] or "apache" @@ -131,7 +131,7 @@ def run(API, environ, indata, session): if indata.get('email'): query['query']['bool']['should'] = [{'term': {'committer_email': indata.get('email')}}, {'term': {'author_email': indata.get('email')}}] query['query']['bool']['minimum_should_match'] = 1 - + # Path filter? if indata.get('pathfilter'): pf = indata.get('pathfilter') @@ -141,7 +141,7 @@ def run(API, environ, indata, session): query['query']['bool']['must_not'].append({'regexp': {'files_changed': pf}}) else: query['query']['bool']['must'].append({'regexp': {'files_changed': pf}}) - + # Get number of committers, this period query['aggs'] = { 'commits': { @@ -149,7 +149,7 @@ def run(API, environ, indata, session): 'field': 'date', 'interval': 'hour', "format": "E - k" - } + } } } res = session.DB.ES.search( @@ -158,7 +158,7 @@ def run(API, environ, indata, session): size = 0, body = query ) - + timeseries = {} for bucket in res['aggregations']['commits']['buckets']: ts = bucket['key_as_string'] diff --git a/api/pages/code/relationships.py b/api/pages/code/relationships.py index 843c1da6..1786b7e6 100644 --- a/api/pages/code/relationships.py +++ b/api/pages/code/relationships.py @@ -56,7 +56,7 @@ # security: # - cookieAuth: [] # summary: Shows a breakdown of contributor relationships between repositories -# +# ######################################################################## @@ -75,32 +75,32 @@ import math def run(API, environ, indata, session): - + # We need to be logged in for this! if not session.user: raise API.exception(403, "You must be logged in to use this API endpoint! %s") - + now = time.time() - + # First, fetch the view if we have such a thing enabled viewList = [] if indata.get('view'): viewList = session.getView(indata.get('view')) if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - + viewList = session.subFilter(indata.get('subfilter'), view = viewList) + dateTo = indata.get('to', int(time.time())) dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span - + which = 'committer_email' role = 'committer' if indata.get('author', False): which = 'author_email' role = 'author' - + interval = indata.get('interval', 'day') - - + + #################################################################### #################################################################### dOrg = session.user['defaultOrganisation'] or "apache" @@ -132,14 +132,14 @@ def run(API, environ, indata, session): query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) if indata.get('email'): query['query']['bool']['must'].append({'term': {'committer_email' if not indata.get('author') else 'author_email': indata.get('email')}}) - + # Get number of commits, this period, per repo query['aggs'] = { 'per_repo': { 'terms': { 'field': 'sourceID', 'size': 10000 - } + } } } res = session.DB.ES.search( @@ -148,7 +148,7 @@ def run(API, environ, indata, session): size = 0, body = query ) - + repos = {} repo_commits = {} authorlinks = {} @@ -157,19 +157,19 @@ def run(API, environ, indata, session): max_shared = 0 max_authors = 0 minLinks = indata.get('links', 1) - + # For each repo, count commits and gather data on authors for doc in res['aggregations']['per_repo']['buckets']: sourceID = doc['key'] commits = doc['doc_count'] - + # Gather the unique authors/committers query['aggs'] = { 'per_contributor': { 'terms': { 'field': 'committer_email' if not indata.get('author') else 'author_email', 'size': 10000 - } + } } } xquery = copy.deepcopy(query) @@ -187,7 +187,7 @@ def run(API, environ, indata, session): max_commits = commits repos[sourceID] = authors repo_commits[sourceID] = commits - + # Now, figure out which repos share the same contributors repo_links = {} repo_notoriety = {} @@ -200,7 +200,7 @@ def run(API, environ, indata, session): if not session.DB.ES.exists(index=session.DB.dbname, doc_type="source", id = ID): continue repodatas[ID] = session.DB.ES.get(index=session.DB.dbname, doc_type="source", id = ID) - + for ID, repo in repos.items(): mylinks = {} if not ID in repodatas: @@ -237,11 +237,11 @@ def run(API, environ, indata, session): if ID not in repo_notoriety: repo_notoriety[ID] = set() repo_notoriety[ID].update(mylinks.keys()) # How many projects is this repo connected to? - + if ID not in repo_authors: repo_authors[ID] = set() repo_authors[ID].update(repo) # How many projects is this repo connected to? - + if ID != oID: repo_commits[ID] = repo_commits.get(ID, 0) + repo_commits[oID] if repo_commits[ID] > max_commits: @@ -250,7 +250,7 @@ def run(API, environ, indata, session): max_links = len(repo_notoriety[ID]) if len(repo_authors[ID]) > max_authors: max_authors = len(repo_authors[ID]) # Used for calculating max sphere size in charts - + # Now, pull it all together! nodes = [] links = [] @@ -273,7 +273,7 @@ def run(API, environ, indata, session): } nodes.append(doc) existing_repos.append(sourceID) - + for k, s in repo_links.items(): size = s fr, to = k.split('@') @@ -286,7 +286,7 @@ def run(API, environ, indata, session): 'tooltip': "%u committers in common" % size } links.append(doc) - + JSON_OUT = { 'maxLinks': max_links, 'maxShared': max_shared, diff --git a/api/pages/code/retention.py b/api/pages/code/retention.py index 6e108441..70a7bc7b 100644 --- a/api/pages/code/retention.py +++ b/api/pages/code/retention.py @@ -56,7 +56,7 @@ # security: # - cookieAuth: [] # summary: Shows retention metrics for a set of repos over a given period of time -# +# ######################################################################## @@ -73,37 +73,37 @@ import datetime def run(API, environ, indata, session): - + # We need to be logged in for this! if not session.user: raise API.exception(403, "You must be logged in to use this API endpoint! %s") - + now = time.time() - + # First, fetch the view if we have such a thing enabled viewList = [] if indata.get('view'): viewList = session.getView(indata.get('view')) if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - - + viewList = session.subFilter(indata.get('subfilter'), view = viewList) + + hl = indata.get('span', 12) # By default, we define a contributor as active if having committer in the past year tnow = datetime.date.today() nm = tnow.month - (tnow.month % 3) ny = tnow.year cy = ny ts = [] - + if nm < 1: nm += 12 ny = ny - 1 - + peopleSeen = {} activePeople = {} allPeople = {} FoundSomething = False - + ny = 1970 while ny < cy or (ny == cy and (nm+3) <= tnow.month): d = datetime.date(ny, nm, 1) @@ -116,7 +116,7 @@ def run(API, environ, indata, session): break d = datetime.date(ny, nm, 1) tf = time.mktime(d.timetuple()) - + #################################################################### #################################################################### dOrg = session.user['defaultOrganisation'] or "apache" @@ -146,32 +146,32 @@ def run(API, environ, indata, session): query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) elif viewList: query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) - + # Get an initial count of commits res = session.DB.ES.count( index=session.DB.dbname, doc_type="code_commit", body = query ) - + globcount = res['count'] if globcount == 0 and not FoundSomething: continue FoundSomething = True - + # Get top 1000 committers this period query['aggs'] = { 'by_committer': { 'terms': { 'field': 'committer_email', 'size': 25000 - } + } }, 'by_author': { 'terms': { 'field': 'author_email', 'size': 25000 - } + } } } res = session.DB.ES.search( @@ -180,12 +180,12 @@ def run(API, environ, indata, session): size = 0, body = query ) - - + + retained = 0 added = 0 lost = 0 - + thisPeriod = [] for bucket in res['aggregations']['by_author']['buckets']: who = bucket['key'] @@ -196,18 +196,18 @@ def run(API, environ, indata, session): activePeople[who] = tf if who not in allPeople: allPeople[who] = tf - + prune = [] for k, v in activePeople.items(): if v < (t - (hl*30.45*86400)): prune.append(k) lost += 1 - + for who in prune: del activePeople[who] del peopleSeen[who] retained = len(activePeople) - added - + ts.append({ 'date': tf, 'People who (re)joined': added, @@ -215,14 +215,14 @@ def run(API, environ, indata, session): 'People retained': retained, 'Active people': added + retained }) - + groups = [ ['More than 5 years', (5*365*86400)+1], ['2 - 5 years', (2*365*86400)+1], ['1 - 2 years', (365*86400)], ['Less than a year', 1] ] - + counts = {} totExp = 0 for person, age in activePeople.items(): @@ -232,7 +232,7 @@ def run(API, environ, indata, session): counts[el[0]] = counts.get(el[0], 0) + 1 break avgyr = (totExp / (86400*365)) / max(len(activePeople),1) - + ts = sorted(ts, key = lambda x: x['date']) avgm = "" yr = int(avgyr) diff --git a/api/pages/code/sloc.py b/api/pages/code/sloc.py index 29b54c55..a6d7fc22 100644 --- a/api/pages/code/sloc.py +++ b/api/pages/code/sloc.py @@ -56,7 +56,7 @@ # security: # - cookieAuth: [] # summary: Shows a breakdown of lines of code for one or more sources -# +# ######################################################################## @@ -70,20 +70,20 @@ import json def run(API, environ, indata, session): - + # We need to be logged in for this! if not session.user: raise API.exception(403, "You must be logged in to use this API endpoint! %s") - - + + # First, fetch the view if we have such a thing enabled viewList = [] if indata.get('view'): viewList = session.getView(indata.get('view')) if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - - + viewList = session.subFilter(indata.get('subfilter'), view = viewList) + + # Fetch all sources for default org dOrg = session.user['defaultOrganisation'] or "apache" query = { @@ -109,14 +109,14 @@ def run(API, environ, indata, session): query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) elif viewList: query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) - + res = session.DB.ES.search( index=session.DB.dbname, doc_type="source", size = 5000, body = query ) - + languages = {} years = 0 for hit in res['hits']['hits']: @@ -129,8 +129,8 @@ def run(API, environ, indata, session): languages[k]['code'] += v.get('code', 0) languages[k]['comment'] += v.get('comment', 0) languages[k]['blank'] += v.get('blank', 0) - - + + JSON_OUT = { 'languages': languages, 'okay': True, diff --git a/api/pages/code/top-commits.py b/api/pages/code/top-commits.py index ce75268e..d811082b 100644 --- a/api/pages/code/top-commits.py +++ b/api/pages/code/top-commits.py @@ -56,7 +56,7 @@ # security: # - cookieAuth: [] # summary: Shows top 25 repos by commit volume -# +# ######################################################################## @@ -72,24 +72,24 @@ import re def run(API, environ, indata, session): - + # We need to be logged in for this! if not session.user: raise API.exception(403, "You must be logged in to use this API endpoint! %s") - + now = time.time() - + # First, fetch the view if we have such a thing enabled viewList = [] if indata.get('view'): viewList = session.getView(indata.get('view')) if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - - + viewList = session.subFilter(indata.get('subfilter'), view = viewList) + + dateTo = indata.get('to', int(time.time())) dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span - + #################################################################### #################################################################### dOrg = session.user['defaultOrganisation'] or "apache" @@ -122,7 +122,7 @@ def run(API, environ, indata, session): if indata.get('email'): query['query']['bool']['should'] = [{'term': {'committer_email': indata.get('email')}}, {'term': {'author_email': indata.get('email')}}] query['query']['bool']['minimum_should_match'] = 1 - + # Path filter? if indata.get('pathfilter'): pf = indata.get('pathfilter') @@ -132,15 +132,15 @@ def run(API, environ, indata, session): query['query']['bool']['must_not'].append({'regexp': {'files_changed': pf}}) else: query['query']['bool']['must'].append({'regexp': {'files_changed': pf}}) - - + + # Get top 25 committers this period query['aggs'] = { 'by_repo': { 'terms': { 'field': 'sourceURL', 'size': 5000 - } + } } } res = session.DB.ES.search( @@ -149,14 +149,14 @@ def run(API, environ, indata, session): size = 0, body = query ) - + toprepos = [] for bucket in res['aggregations']['by_repo']['buckets']: repo = re.sub(r".+/([^/]+?)(?:\.git)?$", r"\1", bucket['key']) count = bucket['doc_count'] - + toprepos.append([repo, count]) - + toprepos = sorted(toprepos, key = lambda x: x[1], reverse = True) top = toprepos[0:24] if len(toprepos) > 25: @@ -164,11 +164,11 @@ def run(API, environ, indata, session): for repo in toprepos[25:]: count += repo[1] top.append(["Other repos", count]) - + tophash = {} for v in top: tophash[v[0]] = v[1] - + JSON_OUT = { 'counts': tophash, 'okay': True, diff --git a/api/pages/code/top-sloc.py b/api/pages/code/top-sloc.py index db0c8592..4cdf2762 100644 --- a/api/pages/code/top-sloc.py +++ b/api/pages/code/top-sloc.py @@ -56,7 +56,7 @@ # security: # - cookieAuth: [] # summary: Shows top 25 repos by lines of code -# +# ######################################################################## @@ -72,21 +72,21 @@ import re def run(API, environ, indata, session): - + # We need to be logged in for this! if not session.user: raise API.exception(403, "You must be logged in to use this API endpoint! %s") - + now = time.time() - + # First, fetch the view if we have such a thing enabled viewList = [] if indata.get('view'): viewList = session.getView(indata.get('view')) if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - - + viewList = session.subFilter(indata.get('subfilter'), view = viewList) + + #################################################################### #################################################################### dOrg = session.user['defaultOrganisation'] or "apache" @@ -113,14 +113,14 @@ def run(API, environ, indata, session): query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) elif viewList: query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) - + res = session.DB.ES.search( index=session.DB.dbname, doc_type="source", size = 5000, body = query ) - + toprepos = [] for doc in res['hits']['hits']: repo = doc['_source'] @@ -130,7 +130,7 @@ def run(API, environ, indata, session): if not count: count = 0 toprepos.append([url, count]) - + toprepos = sorted(toprepos, key = lambda x: int(x[1]), reverse = True) top = toprepos[0:24] if len(toprepos) > 25: @@ -138,11 +138,11 @@ def run(API, environ, indata, session): for repo in toprepos[25:]: count += repo[1] top.append(["Other repos", count]) - + tophash = {} for v in top: tophash[v[0]] = v[1] - + JSON_OUT = { 'counts': tophash, 'okay': True, diff --git a/api/pages/code/trends.py b/api/pages/code/trends.py index 69d9b130..d0cfc449 100644 --- a/api/pages/code/trends.py +++ b/api/pages/code/trends.py @@ -56,7 +56,7 @@ # security: # - cookieAuth: [] # summary: Shows trend data for a set of repos over a given period of time -# +# ######################################################################## @@ -71,29 +71,29 @@ import time def run(API, environ, indata, session): - + # We need to be logged in for this! if not session.user: raise API.exception(403, "You must be logged in to use this API endpoint! %s") - + now = time.time() - + # First, fetch the view if we have such a thing enabled viewList = [] if indata.get('view'): viewList = session.getView(indata.get('view')) if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - - + viewList = session.subFilter(indata.get('subfilter'), view = viewList) + + dateTo = indata.get('to', int(time.time())) dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span if dateFrom < 0: dateFrom = 0 dateYonder = dateFrom - (dateTo - dateFrom) - - - + + + #################################################################### # We start by doing all the queries for THIS period. # # Then we reset the query, and change date to yonder-->from # @@ -129,7 +129,7 @@ def run(API, environ, indata, session): if indata.get('email'): query['query']['bool']['should'] = [{'term': {'committer_email': indata.get('email')}}, {'term': {'author_email': indata.get('email')}}] query['query']['bool']['minimum_should_match'] = 1 - + # Path filter? if indata.get('pathfilter'): pf = indata.get('pathfilter') @@ -139,7 +139,7 @@ def run(API, environ, indata, session): query['query']['bool']['must_not'].append({'regexp': {'files_changed': pf}}) else: query['query']['bool']['must'].append({'regexp': {'files_changed': pf}}) - + # Get number of commits, this period res = session.DB.ES.count( index=session.DB.dbname, @@ -147,8 +147,8 @@ def run(API, environ, indata, session): body = query ) no_commits = res['count'] - - + + # Get number of committers, this period query['aggs'] = { 'commits': { @@ -161,7 +161,7 @@ def run(API, environ, indata, session): 'field': 'author_email' } } - + } res = session.DB.ES.search( index=session.DB.dbname, @@ -171,8 +171,8 @@ def run(API, environ, indata, session): ) no_committers = res['aggregations']['commits']['value'] no_authors = res['aggregations']['authors']['value'] - - + + # Get number of insertions, this period query['aggs'] = { 'changes': { @@ -188,7 +188,7 @@ def run(API, environ, indata, session): body = query ) insertions = res['aggregations']['changes']['value'] - + # Get number of deletions, this period query['aggs'] = { 'changes': { @@ -204,8 +204,8 @@ def run(API, environ, indata, session): body = query ) deletions = res['aggregations']['changes']['value'] - - + + #################################################################### # Change to PRIOR SPAN # #################################################################### @@ -236,7 +236,7 @@ def run(API, environ, indata, session): query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) elif viewList: query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) - + # Path filter? if indata.get('pathfilter'): pf = indata.get('pathfilter') @@ -246,8 +246,8 @@ def run(API, environ, indata, session): query['query']['bool']['must_not'].append({'regexp': {'files_changed': pf}}) else: query['query']['bool']['must'].append({'regexp': {'files_changed': pf}}) - - + + # Get number of commits, this period res = session.DB.ES.count( index=session.DB.dbname, @@ -255,7 +255,7 @@ def run(API, environ, indata, session): body = query ) no_commits_before = res['count'] - + # Get number of committers, this period query['aggs'] = { 'commits': { @@ -277,7 +277,7 @@ def run(API, environ, indata, session): ) no_committers_before = res['aggregations']['commits']['value'] no_authors_before = res['aggregations']['authors']['value'] - + # Get number of insertions, this period query['aggs'] = { 'changes': { @@ -293,7 +293,7 @@ def run(API, environ, indata, session): body = query ) insertions_before = res['aggregations']['changes']['value'] - + # Get number of deletions, this period query['aggs'] = { 'changes': { @@ -309,9 +309,9 @@ def run(API, environ, indata, session): body = query ) deletions_before = res['aggregations']['changes']['value'] - - - + + + trends = { "committers": { 'before': no_committers_before, @@ -334,7 +334,7 @@ def run(API, environ, indata, session): 'title': "Lines changed this period" } } - + JSON_OUT = { 'trends': trends, 'okay': True, @@ -359,4 +359,3 @@ def run(API, environ, indata, session): title = "Lines changed" } """ - \ No newline at end of file diff --git a/api/pages/filters.py b/api/pages/filters.py index d2960517..a97112cb 100644 --- a/api/pages/filters.py +++ b/api/pages/filters.py @@ -24,11 +24,11 @@ import time def run(API, environ, indata, session): - + # We need to be logged in for this! if not session.user: raise API.exception(403, "You must be logged in to use this API endpoint! %s") - + # Fetch all sources for default org dOrg = session.user['defaultOrganisation'] or "apache" res = session.DB.ES.search( @@ -56,7 +56,7 @@ def run(API, environ, indata, session): sources.append(xdoc) else: sources.append(doc) - + JSON_OUT = { 'views': sources, 'okay': True, diff --git a/api/pages/forum/actors.py b/api/pages/forum/actors.py index 345f59ae..40ad8ae2 100644 --- a/api/pages/forum/actors.py +++ b/api/pages/forum/actors.py @@ -56,7 +56,7 @@ # security: # - cookieAuth: [] # summary: Shows timeseries of no. of people opening topics or replying to them. -# +# ######################################################################## @@ -72,27 +72,27 @@ import hashlib def run(API, environ, indata, session): - + # We need to be logged in for this! if not session.user: raise API.exception(403, "You must be logged in to use this API endpoint! %s") - + now = time.time() - + # First, fetch the view if we have such a thing enabled viewList = [] if indata.get('view'): viewList = session.getView(indata.get('view')) if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - - + viewList = session.subFilter(indata.get('subfilter'), view = viewList) + + dateTo = indata.get('to', int(time.time())) dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span - + interval = indata.get('interval', 'month') - - + + #################################################################### #################################################################### dOrg = session.user['defaultOrganisation'] or "apache" @@ -124,7 +124,7 @@ def run(API, environ, indata, session): query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) if indata.get('email'): query['query']['bool']['should'] = [{'term': {'issueCreator': indata.get('email')}}] - + # Get timeseries for this period query['aggs'] = { 'per_interval': { @@ -141,14 +141,14 @@ def run(API, environ, indata, session): } } } - + res = session.DB.ES.search( index=session.DB.dbname, doc_type="forum_post", size = 0, body = query ) - + timeseries = {} for bucket in res['aggregations']['per_interval']['buckets']: @@ -159,8 +159,8 @@ def run(API, environ, indata, session): 'topic responders': ccount, 'topic creators': 0 } - - + + #################################################################### #################################################################### dOrg = session.user['defaultOrganisation'] or "apache" @@ -192,7 +192,7 @@ def run(API, environ, indata, session): query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) if indata.get('email'): query['query']['bool']['should'] = [{'term': {'creator': indata.get('email')}}] - + # Get timeseries for this period query['aggs'] = { 'per_interval': { @@ -209,7 +209,7 @@ def run(API, environ, indata, session): } } } - + res = session.DB.ES.search( index=session.DB.dbname, doc_type="forum_topic", @@ -228,11 +228,11 @@ def run(API, environ, indata, session): 'topic creators': 0, 'topic responders': ccount } - + ts = [] for x, el in timeseries.items(): ts.append(el) - + JSON_OUT = { 'timeseries': ts, 'okay': True, diff --git a/api/pages/forum/creators.py b/api/pages/forum/creators.py index dc6a6c6a..574e4577 100644 --- a/api/pages/forum/creators.py +++ b/api/pages/forum/creators.py @@ -56,7 +56,7 @@ # security: # - cookieAuth: [] # summary: Shows the top N of forum topic creators -# +# ######################################################################## @@ -72,27 +72,27 @@ import hashlib def run(API, environ, indata, session): - + # We need to be logged in for this! if not session.user: raise API.exception(403, "You must be logged in to use this API endpoint! %s") - + now = time.time() - + # First, fetch the view if we have such a thing enabled viewList = [] if indata.get('view'): viewList = session.getView(indata.get('view')) if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - - + viewList = session.subFilter(indata.get('subfilter'), view = viewList) + + dateTo = indata.get('to', int(time.time())) dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span - + interval = indata.get('interval', 'month') xtitle = None - + #################################################################### #################################################################### dOrg = session.user['defaultOrganisation'] or "apache" @@ -125,7 +125,7 @@ def run(API, environ, indata, session): if indata.get('email'): query['query']['bool']['must'].append({'term': {'creator': indata.get('email')}}) xtitle = "People opening issues solved by %s" % indata.get('email') - + # Get top 25 committers this period query['aggs'] = { 'committers': { @@ -134,9 +134,9 @@ def run(API, environ, indata, session): 'size': 25 }, 'aggs': { - + } - } + } } res = session.DB.ES.search( index=session.DB.dbname, @@ -161,7 +161,7 @@ def run(API, environ, indata, session): people[email] = person people[email]['gravatar'] = hashlib.md5(person.get('email', 'unknown').encode('utf-8')).hexdigest() people[email]['count'] = count - + topN = [] for email, person in people.items(): topN.append(person) diff --git a/api/pages/forum/issues.py b/api/pages/forum/issues.py index a485bfac..fa27035b 100644 --- a/api/pages/forum/issues.py +++ b/api/pages/forum/issues.py @@ -56,7 +56,7 @@ # security: # - cookieAuth: [] # summary: Shows timeseries of forum topics opened/responded-to over time -# +# ######################################################################## @@ -81,40 +81,40 @@ def makeTS(dist): return ts def run(API, environ, indata, session): - + # We need to be logged in for this! if not session.user: raise API.exception(403, "You must be logged in to use this API endpoint! %s") - + now = time.time() - + # First, fetch the view if we have such a thing enabled viewList = [] if indata.get('view'): viewList = session.getView(indata.get('view')) if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - - + viewList = session.subFilter(indata.get('subfilter'), view = viewList) + + dateTo = indata.get('to', int(time.time())) dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span - + interval = indata.get('interval', 'month') - + # By default, we lump generic forums and question/answer (like SO, askbot) together as one distinct = { 'forum': ['discourse', 'stackoverflow', 'askbot'] } - + # If requested, we split them into two if indata.get('distinguish', False): distinct = { 'forum': ['discourse'], 'question bank': ['stackoverflow', 'askbot'] } - + timeseries = {} - + # For each category and the issue types that go along with that, # grab opened and closed over time. for iType, iValues in distinct.items(): @@ -155,14 +155,14 @@ def run(API, environ, indata, session): query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) if indata.get('email'): query['query']['bool']['must'].append({'term': {'creator': indata.get('email')}}) - + # Get number of opened ones, this period query['aggs'] = { 'commits': { 'date_histogram': { 'field': 'createdDate', 'interval': interval - } + } } } res = session.DB.ES.search( @@ -171,14 +171,14 @@ def run(API, environ, indata, session): size = 0, body = query ) - + for bucket in res['aggregations']['commits']['buckets']: ts = int(bucket['key'] / 1000) count = bucket['doc_count'] timeseries[ts] = timeseries.get(ts, makeTS(distinct)) timeseries[ts][iType + ' topics'] = timeseries[ts].get(iType + ' topics', 0) + count - - + + #################################################################### # ISSUES CLOSED # #################################################################### @@ -215,14 +215,14 @@ def run(API, environ, indata, session): query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) if indata.get('email'): query['query']['bool']['must'].append({'term': {'creator': indata.get('email')}}) - + # Get number of closed ones, this period query['aggs'] = { 'commits': { 'date_histogram': { 'field': 'createdDate', 'interval': interval - } + } } } res = session.DB.ES.search( @@ -231,19 +231,19 @@ def run(API, environ, indata, session): size = 0, body = query ) - + for bucket in res['aggregations']['commits']['buckets']: ts = int(bucket['key'] / 1000) count = bucket['doc_count'] timeseries[ts] = timeseries.get(ts, makeTS(distinct)) timeseries[ts][iType + ' replies'] = timeseries[ts].get(iType + ' replies', 0) + count - + ts = [] for k, v in timeseries.items(): v['date'] = k ts.append(v) - - + + JSON_OUT = { 'widgetType': { 'chartType': 'line', # Recommendation for the UI diff --git a/api/pages/forum/responders.py b/api/pages/forum/responders.py index 6c12ca2a..d379c1ef 100644 --- a/api/pages/forum/responders.py +++ b/api/pages/forum/responders.py @@ -56,7 +56,7 @@ # security: # - cookieAuth: [] # summary: Shows the top N of issue closers -# +# ######################################################################## @@ -72,28 +72,28 @@ import hashlib def run(API, environ, indata, session): - + # We need to be logged in for this! if not session.user: raise API.exception(403, "You must be logged in to use this API endpoint! %s") - + now = time.time() - + # First, fetch the view if we have such a thing enabled viewList = [] if indata.get('view'): viewList = session.getView(indata.get('view')) if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - - + viewList = session.subFilter(indata.get('subfilter'), view = viewList) + + dateTo = indata.get('to', int(time.time())) dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span - + interval = indata.get('interval', 'month') xtitle = None - - + + #################################################################### #################################################################### dOrg = session.user['defaultOrganisation'] or "apache" @@ -126,7 +126,7 @@ def run(API, environ, indata, session): if indata.get('email'): query['query']['bool']['must'].append({'term': {'creator': indata.get('email')}}) xTitle = "People closing %s's issues" % indata.get('email') - + # Get top 25 committers this period query['aggs'] = { 'committers': { @@ -135,9 +135,9 @@ def run(API, environ, indata, session): 'size': 25 }, 'aggs': { - + } - } + } } res = session.DB.ES.search( index=session.DB.dbname, @@ -162,7 +162,7 @@ def run(API, environ, indata, session): people[email] = person people[email]['gravatar'] = hashlib.md5(person.get('email', 'unknown').encode('utf-8')).hexdigest() people[email]['count'] = count - + topN = [] for email, person in people.items(): topN.append(person) diff --git a/api/pages/forum/top-count.py b/api/pages/forum/top-count.py index 58d345c1..585407fd 100644 --- a/api/pages/forum/top-count.py +++ b/api/pages/forum/top-count.py @@ -56,7 +56,7 @@ # security: # - cookieAuth: [] # summary: Shows top 25 forums by interactions -# +# ######################################################################## @@ -72,24 +72,24 @@ import re def run(API, environ, indata, session): - + # We need to be logged in for this! if not session.user: raise API.exception(403, "You must be logged in to use this API endpoint! %s") - + now = time.time() - + # First, fetch the view if we have such a thing enabled viewList = [] if indata.get('view'): viewList = session.getView(indata.get('view')) if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - - + viewList = session.subFilter(indata.get('subfilter'), view = viewList) + + dateTo = indata.get('to', int(time.time())) dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span - + #################################################################### #################################################################### dOrg = session.user['defaultOrganisation'] or "apache" @@ -123,15 +123,15 @@ def run(API, environ, indata, session): query['query']['bool']['should'] = [ {'term': {'creator': indata.get('email')}} ] - - + + # Get top 25 committers this period query['aggs'] = { 'by_repo': { 'terms': { 'field': 'sourceID', 'size': 5000 - } + } } } res = session.DB.ES.search( @@ -140,7 +140,7 @@ def run(API, environ, indata, session): size = 0, body = query ) - + toprepos = [] for bucket in res['aggregations']['by_repo']['buckets']: ID = bucket['key'] @@ -149,7 +149,7 @@ def run(API, environ, indata, session): repo = re.sub(r".+/([^/]+)$", r"\1", it['sourceURL']) count = bucket['doc_count'] toprepos.append([repo, count]) - + toprepos = sorted(toprepos, key = lambda x: x[1], reverse = True) top = toprepos[0:24] if len(toprepos) > 25: @@ -157,11 +157,11 @@ def run(API, environ, indata, session): for repo in toprepos[25:]: count += repo[1] top.append(["Other forums", count]) - + tophash = {} for v in top: tophash[v[0]] = v[1] - + JSON_OUT = { 'counts': tophash, 'okay': True, diff --git a/api/pages/forum/top.py b/api/pages/forum/top.py index 51d4c8d6..3d184320 100644 --- a/api/pages/forum/top.py +++ b/api/pages/forum/top.py @@ -56,7 +56,7 @@ # security: # - cookieAuth: [] # summary: Shows the top N topics by interactions -# +# ######################################################################## @@ -72,27 +72,27 @@ import hashlib def run(API, environ, indata, session): - + # We need to be logged in for this! if not session.user: raise API.exception(403, "You must be logged in to use this API endpoint! %s") - + now = time.time() - + # First, fetch the view if we have such a thing enabled viewList = [] if indata.get('view'): viewList = session.getView(indata.get('view')) if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - - + viewList = session.subFilter(indata.get('subfilter'), view = viewList) + + dateTo = indata.get('to', int(time.time())) dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span - + interval = indata.get('interval', 'month') - - + + #################################################################### #################################################################### dOrg = session.user['defaultOrganisation'] or "apache" @@ -127,7 +127,7 @@ def run(API, environ, indata, session): query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) if indata.get('email'): query['query']['bool']['should'] = [{'term': {'creator': indata.get('email')}}] - + res = session.DB.ES.search( index=session.DB.dbname, doc_type="forum_topic", @@ -142,8 +142,8 @@ def run(API, environ, indata, session): doc['subject'] = doc.get('title') doc['count'] = doc.get('posts', 0) top.append(doc) - - + + JSON_OUT = { 'topN': { 'denoter': 'interactions', diff --git a/api/pages/forum/trends.py b/api/pages/forum/trends.py index 6012890e..208ccb0f 100644 --- a/api/pages/forum/trends.py +++ b/api/pages/forum/trends.py @@ -56,7 +56,7 @@ # security: # - cookieAuth: [] # summary: Shows trend data for a set of forums over a given period of time -# +# ######################################################################## @@ -71,30 +71,30 @@ import time def run(API, environ, indata, session): - + # We need to be logged in for this! if not session.user: raise API.exception(403, "You must be logged in to use this API endpoint! %s") - + now = time.time() - + # First, fetch the view if we have such a thing enabled viewList = [] if indata.get('view'): viewList = session.getView(indata.get('view')) if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - - + viewList = session.subFilter(indata.get('subfilter'), view = viewList) + + dateTo = indata.get('to', int(time.time())) dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span if dateFrom < 0: dateFrom = 0 dateYonder = dateFrom - (dateTo - dateFrom) - - + + dOrg = session.user['defaultOrganisation'] or "apache" - + #################################################################### # We start by doing all the queries for THIS period. # # Then we reset the query, and change date to yonder-->from # @@ -126,7 +126,7 @@ def run(API, environ, indata, session): query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) elif viewList: query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) - + # Get number of issues created, this period res = session.DB.ES.count( index=session.DB.dbname, @@ -134,8 +134,8 @@ def run(API, environ, indata, session): body = query ) no_issues_created = res['count'] - - + + # Get number of open/close, this period query['aggs'] = { 'opener': { @@ -151,10 +151,10 @@ def run(API, environ, indata, session): body = query ) no_creators = res['aggregations']['opener']['value'] - - + + # REPLIERS - + query = { 'query': { 'bool': { @@ -182,7 +182,7 @@ def run(API, environ, indata, session): elif viewList: query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) - + # Get number of issues created, this period res = session.DB.ES.count( index=session.DB.dbname, @@ -190,8 +190,8 @@ def run(API, environ, indata, session): body = query ) no_issues_closed = res['count'] - - + + # Get number of open/close, this period query['aggs'] = { 'closer': { @@ -207,8 +207,8 @@ def run(API, environ, indata, session): body = query ) no_closers = res['aggregations']['closer']['value'] - - + + #################################################################### # Change to PRIOR SPAN # #################################################################### @@ -233,13 +233,13 @@ def run(API, environ, indata, session): } } } - + if indata.get('source'): query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) elif viewList: query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) - - + + # Get number of issues, this period res = session.DB.ES.count( index=session.DB.dbname, @@ -247,7 +247,7 @@ def run(API, environ, indata, session): body = query ) no_issues_created_before = res['count'] - + # Get number of committers, this period query['aggs'] = { 'opener': { @@ -263,11 +263,11 @@ def run(API, environ, indata, session): body = query ) no_creators_before = res['aggregations']['opener']['value'] - - - + + + # REPLIERS - + query = { 'query': { 'bool': { @@ -293,7 +293,7 @@ def run(API, environ, indata, session): query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) elif viewList: query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) - + # Get number of issues created, this period res = session.DB.ES.count( index=session.DB.dbname, @@ -301,8 +301,8 @@ def run(API, environ, indata, session): body = query ) no_issues_closed_before = res['count'] - - + + # Get number of open/close, this period query['aggs'] = { 'closer': { @@ -318,7 +318,7 @@ def run(API, environ, indata, session): body = query ) no_closers_before = res['aggregations']['closer']['value'] - + trends = { "created": { 'before': no_issues_created_before, @@ -341,7 +341,7 @@ def run(API, environ, indata, session): 'title': "People replying this period" } } - + JSON_OUT = { 'trends': trends, 'okay': True, diff --git a/api/pages/issue/actors.py b/api/pages/issue/actors.py index 37a5124a..b53e839a 100644 --- a/api/pages/issue/actors.py +++ b/api/pages/issue/actors.py @@ -56,7 +56,7 @@ # security: # - cookieAuth: [] # summary: Shows timeseries of no. of people opening/closing issues over time -# +# ######################################################################## @@ -72,27 +72,27 @@ import hashlib def run(API, environ, indata, session): - + # We need to be logged in for this! if not session.user: raise API.exception(403, "You must be logged in to use this API endpoint! %s") - + now = time.time() - + # First, fetch the view if we have such a thing enabled viewList = [] if indata.get('view'): viewList = session.getView(indata.get('view')) if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - - + viewList = session.subFilter(indata.get('subfilter'), view = viewList) + + dateTo = indata.get('to', int(time.time())) dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span - + interval = indata.get('interval', 'month') - - + + #################################################################### #################################################################### dOrg = session.user['defaultOrganisation'] or "apache" @@ -125,7 +125,7 @@ def run(API, environ, indata, session): if indata.get('email'): query['query']['bool']['should'] = [{'term': {'issueCreator': indata.get('email')}}, {'term': {'issueCloser': indata.get('email')}}] query['query']['bool']['minimum_should_match'] = 1 - + # Get timeseries for this period query['aggs'] = { 'per_interval': { @@ -142,14 +142,14 @@ def run(API, environ, indata, session): } } } - + res = session.DB.ES.search( index=session.DB.dbname, doc_type="issue", size = 0, body = query ) - + timeseries = {} for bucket in res['aggregations']['per_interval']['buckets']: ts = int(bucket['key'] / 1000) @@ -159,8 +159,8 @@ def run(API, environ, indata, session): 'closers': ccount, 'openers': 0 } - - + + #################################################################### #################################################################### dOrg = session.user['defaultOrganisation'] or "apache" @@ -193,7 +193,7 @@ def run(API, environ, indata, session): if indata.get('email'): query['query']['bool']['should'] = [{'term': {'issueCreator': indata.get('email')}}, {'term': {'issueCloser': indata.get('email')}}] query['query']['bool']['minimum_should_match'] = 1 - + # Get timeseries for this period query['aggs'] = { 'per_interval': { @@ -210,7 +210,7 @@ def run(API, environ, indata, session): } } } - + res = session.DB.ES.search( index=session.DB.dbname, doc_type="issue", @@ -229,11 +229,11 @@ def run(API, environ, indata, session): 'closers': 0, 'openers': ccount } - + ts = [] for x, el in timeseries.items(): ts.append(el) - + JSON_OUT = { 'timeseries': ts, 'okay': True, diff --git a/api/pages/issue/age.py b/api/pages/issue/age.py index 4ab1f617..b6a3d901 100644 --- a/api/pages/issue/age.py +++ b/api/pages/issue/age.py @@ -56,7 +56,7 @@ # security: # - cookieAuth: [] # summary: Shows timeseries of no. of open tickets by age -# +# ######################################################################## @@ -72,23 +72,23 @@ import hashlib def run(API, environ, indata, session): - + # We need to be logged in for this! if not session.user: raise API.exception(403, "You must be logged in to use this API endpoint! %s") - + now = time.time() - + # First, fetch the view if we have such a thing enabled viewList = [] if indata.get('view'): viewList = session.getView(indata.get('view')) if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - - + viewList = session.subFilter(indata.get('subfilter'), view = viewList) + + interval = indata.get('interval', 'month') - + #################################################################### #################################################################### dOrg = session.user['defaultOrganisation'] or "apache" @@ -118,7 +118,7 @@ def run(API, environ, indata, session): if indata.get('email'): query['query']['bool']['should'] = [{'term': {'issueCreator': indata.get('email')}}, {'term': {'issueCloser': indata.get('email')}}] query['query']['bool']['minimum_should_match'] = 1 - + # Get timeseries for this period query['aggs'] = { 'per_interval': { @@ -128,7 +128,7 @@ def run(API, environ, indata, session): } } } - + res = session.DB.ES.search( index=session.DB.dbname, doc_type="issue", @@ -144,9 +144,9 @@ def run(API, environ, indata, session): 'date': ts, 'open': opened }) - - - + + + JSON_OUT = { 'timeseries': timeseries, 'okay': True, diff --git a/api/pages/issue/closers.py b/api/pages/issue/closers.py index 6515130e..53ada24a 100644 --- a/api/pages/issue/closers.py +++ b/api/pages/issue/closers.py @@ -56,7 +56,7 @@ # security: # - cookieAuth: [] # summary: Shows the top N of issue closers -# +# ######################################################################## @@ -72,28 +72,28 @@ import hashlib def run(API, environ, indata, session): - + # We need to be logged in for this! if not session.user: raise API.exception(403, "You must be logged in to use this API endpoint! %s") - + now = time.time() - + # First, fetch the view if we have such a thing enabled viewList = [] if indata.get('view'): viewList = session.getView(indata.get('view')) if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - - + viewList = session.subFilter(indata.get('subfilter'), view = viewList) + + dateTo = indata.get('to', int(time.time())) dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span - + interval = indata.get('interval', 'month') xtitle = None - - + + #################################################################### #################################################################### dOrg = session.user['defaultOrganisation'] or "apache" @@ -126,7 +126,7 @@ def run(API, environ, indata, session): if indata.get('email'): query['query']['bool']['must'].append({'term': {'issueCreator': indata.get('email')}}) xTitle = "People closing %s's issues" % indata.get('email') - + # Get top 25 committers this period query['aggs'] = { 'committers': { @@ -135,9 +135,9 @@ def run(API, environ, indata, session): 'size': 25 }, 'aggs': { - + } - } + } } res = session.DB.ES.search( index=session.DB.dbname, @@ -162,7 +162,7 @@ def run(API, environ, indata, session): people[email] = person people[email]['gravatar'] = hashlib.md5(person.get('email', 'unknown').encode('utf-8')).hexdigest() people[email]['count'] = count - + topN = [] for email, person in people.items(): topN.append(person) diff --git a/api/pages/issue/issues.py b/api/pages/issue/issues.py index 623eaa7e..dac17211 100644 --- a/api/pages/issue/issues.py +++ b/api/pages/issue/issues.py @@ -56,7 +56,7 @@ # security: # - cookieAuth: [] # summary: Shows timeseries of issues opened/closed over time -# +# ######################################################################## @@ -81,40 +81,40 @@ def makeTS(dist): return ts def run(API, environ, indata, session): - + # We need to be logged in for this! if not session.user: raise API.exception(403, "You must be logged in to use this API endpoint! %s") - + now = time.time() - + # First, fetch the view if we have such a thing enabled viewList = [] if indata.get('view'): viewList = session.getView(indata.get('view')) if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - - + viewList = session.subFilter(indata.get('subfilter'), view = viewList) + + dateTo = indata.get('to', int(time.time())) dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span - + interval = indata.get('interval', 'month') - + # By default, we lump PRs and issues into the same category distinct = { 'issues': ['issue', 'pullrequest'] } - + # If requested, we split them into two if indata.get('distinguish', False): distinct = { 'issues': ['issue'], 'pull requests': ['pullrequest'] } - + timeseries = {} - + # For each category and the issue types that go along with that, # grab opened and closed over time. for iType, iValues in distinct.items(): @@ -155,14 +155,14 @@ def run(API, environ, indata, session): query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) if indata.get('email'): query['query']['bool']['must'].append({'term': {'issueCreator': indata.get('email')}}) - + # Get number of opened ones, this period query['aggs'] = { 'commits': { 'date_histogram': { 'field': 'createdDate', 'interval': interval - } + } } } res = session.DB.ES.search( @@ -171,14 +171,14 @@ def run(API, environ, indata, session): size = 0, body = query ) - + for bucket in res['aggregations']['commits']['buckets']: ts = int(bucket['key'] / 1000) count = bucket['doc_count'] timeseries[ts] = timeseries.get(ts, makeTS(distinct)) timeseries[ts][iType + ' opened'] = timeseries[ts].get(iType + ' opened', 0) + count - - + + #################################################################### # ISSUES CLOSED # #################################################################### @@ -215,14 +215,14 @@ def run(API, environ, indata, session): query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) if indata.get('email'): query['query']['bool']['must'].append({'term': {'issueCloser': indata.get('email')}}) - + # Get number of closed ones, this period query['aggs'] = { 'commits': { 'date_histogram': { 'field': 'closedDate', 'interval': interval - } + } } } res = session.DB.ES.search( @@ -231,19 +231,19 @@ def run(API, environ, indata, session): size = 0, body = query ) - + for bucket in res['aggregations']['commits']['buckets']: ts = int(bucket['key'] / 1000) count = bucket['doc_count'] timeseries[ts] = timeseries.get(ts, makeTS(distinct)) timeseries[ts][iType + ' closed'] = timeseries[ts].get(iType + ' closed', 0) + count - + ts = [] for k, v in timeseries.items(): v['date'] = k ts.append(v) - - + + JSON_OUT = { 'widgetType': { 'chartType': 'line', # Recommendation for the UI diff --git a/api/pages/issue/openers.py b/api/pages/issue/openers.py index 321e5d0f..a2e081e5 100644 --- a/api/pages/issue/openers.py +++ b/api/pages/issue/openers.py @@ -56,7 +56,7 @@ # security: # - cookieAuth: [] # summary: Shows the top N of issue openers -# +# ######################################################################## @@ -72,27 +72,27 @@ import hashlib def run(API, environ, indata, session): - + # We need to be logged in for this! if not session.user: raise API.exception(403, "You must be logged in to use this API endpoint! %s") - + now = time.time() - + # First, fetch the view if we have such a thing enabled viewList = [] if indata.get('view'): viewList = session.getView(indata.get('view')) if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - - + viewList = session.subFilter(indata.get('subfilter'), view = viewList) + + dateTo = indata.get('to', int(time.time())) dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span - + interval = indata.get('interval', 'month') xtitle = None - + #################################################################### #################################################################### dOrg = session.user['defaultOrganisation'] or "apache" @@ -125,7 +125,7 @@ def run(API, environ, indata, session): if indata.get('email'): query['query']['bool']['must'].append({'term': {'issueCloser': indata.get('email')}}) xtitle = "People opening issues solved by %s" % indata.get('email') - + # Get top 25 committers this period query['aggs'] = { 'committers': { @@ -134,9 +134,9 @@ def run(API, environ, indata, session): 'size': 25 }, 'aggs': { - + } - } + } } res = session.DB.ES.search( index=session.DB.dbname, @@ -161,7 +161,7 @@ def run(API, environ, indata, session): people[email] = person people[email]['gravatar'] = hashlib.md5(person.get('email', 'unknown').encode('utf-8')).hexdigest() people[email]['count'] = count - + topN = [] for email, person in people.items(): topN.append(person) diff --git a/api/pages/issue/pony-timeseries.py b/api/pages/issue/pony-timeseries.py index 2bf096d5..f8ae3608 100644 --- a/api/pages/issue/pony-timeseries.py +++ b/api/pages/issue/pony-timeseries.py @@ -56,7 +56,7 @@ # security: # - cookieAuth: [] # summary: Shows timeseries of Pony Factor over time -# +# ######################################################################## @@ -74,31 +74,31 @@ import dateutil.relativedelta def run(API, environ, indata, session): - + # We need to be logged in for this! if not session.user: raise API.exception(403, "You must be logged in to use this API endpoint! %s") - + now = time.time() - + # First, fetch the view if we have such a thing enabled viewList = [] if indata.get('view'): viewList = session.getView(indata.get('view')) if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - - + viewList = session.subFilter(indata.get('subfilter'), view = viewList) + + hl = indata.get('span', 24) tnow = datetime.date.today() nm = tnow.month - (tnow.month % 3) ny = tnow.year ts = [] - + if nm < 1: nm += 12 ny = ny - 1 - + while ny > 1970: d = datetime.date(ny, nm, 1) t = time.mktime(d.timetuple()) @@ -108,8 +108,8 @@ def run(API, environ, indata, session): if nm < 1: nm += 12 ny = ny - 1 - - + + #################################################################### #################################################################### dOrg = session.user['defaultOrganisation'] or "apache" @@ -139,31 +139,31 @@ def run(API, environ, indata, session): query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) elif viewList: query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) - + # Get an initial count of commits res = session.DB.ES.count( index=session.DB.dbname, doc_type="issue", body = query ) - + globcount = res['count'] if globcount == 0: break - + # Get top 25 committers this period query['aggs'] = { 'by_creator': { 'terms': { 'field': 'issueCreator', 'size': 1000 - } + } }, 'by_closer': { 'terms': { 'field': 'issueCloser', 'size': 1000 - } + } } } res = session.DB.ES.search( @@ -172,9 +172,9 @@ def run(API, environ, indata, session): size = 0, body = query ) - + cpf = {} - + # PF for openers pf_opener = 0 pf_opener_count = 0 @@ -187,7 +187,7 @@ def run(API, environ, indata, session): cpf[mldom] = True if pf_opener_count > int(globcount/2): break - + # PF for closer pf_closer = 0 pf_closer_count = 0 @@ -206,9 +206,9 @@ def run(API, environ, indata, session): 'Pony Factor (closers)': pf_closer, 'Meta-Pony Factor': len(cpf) }) - + ts = sorted(ts, key = lambda x: x['date']) - + JSON_OUT = { 'text': "This shows Pony Factors as calculated over a %u month timespan. Openers measures the people submitting the bulk of the issues, closers mesaures the people closing (resolving) the issues, and meta-pony is an estimation of how many organisations/companies are involved." % hl, 'timeseries': ts, diff --git a/api/pages/issue/relationships.py b/api/pages/issue/relationships.py index f660832f..ff0c9d6a 100644 --- a/api/pages/issue/relationships.py +++ b/api/pages/issue/relationships.py @@ -56,7 +56,7 @@ # security: # - cookieAuth: [] # summary: Shows a breakdown of contributor relationships between issue trackers -# +# ######################################################################## @@ -75,32 +75,32 @@ import math def run(API, environ, indata, session): - + # We need to be logged in for this! if not session.user: raise API.exception(403, "You must be logged in to use this API endpoint! %s") - + now = time.time() - + # First, fetch the view if we have such a thing enabled viewList = [] if indata.get('view'): viewList = session.getView(indata.get('view')) if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - + viewList = session.subFilter(indata.get('subfilter'), view = viewList) + dateTo = indata.get('to', int(time.time())) dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span - + which = 'committer_email' role = 'committer' if indata.get('author', False): which = 'author_email' role = 'author' - + interval = indata.get('interval', 'day') - - + + #################################################################### #################################################################### dOrg = session.user['defaultOrganisation'] or "apache" @@ -133,14 +133,14 @@ def run(API, environ, indata, session): if indata.get('email'): query['query']['bool']['should'] = [{'term': {'issueCreator': indata.get('email')}}, {'term': {'issueCloser': indata.get('email')}}] query['query']['bool']['minimum_should_match'] = 1 - + # Get number of commits, this period, per repo query['aggs'] = { 'per_repo': { 'terms': { 'field': 'sourceID', 'size': 10000 - } + } } } res = session.DB.ES.search( @@ -149,7 +149,7 @@ def run(API, environ, indata, session): size = 0, body = query ) - + repos = {} repo_commits = {} authorlinks = {} @@ -157,25 +157,25 @@ def run(API, environ, indata, session): max_links = 0 max_shared = 0 max_authors = 0 - + # For each repo, count commits and gather data on authors for doc in res['aggregations']['per_repo']['buckets']: sourceID = doc['key'] commits = doc['doc_count'] - + # Gather the unique authors/committers query['aggs'] = { 'per_closer': { 'terms': { 'field': 'issueCloser', 'size': 10000 - } + } }, 'per_creator': { 'terms': { 'field': 'issueCreator', 'size': 10000 - } + } } } xquery = copy.deepcopy(query) @@ -195,7 +195,7 @@ def run(API, environ, indata, session): max_commits = commits repos[sourceID] = authors repo_commits[sourceID] = commits - + # Now, figure out which repos share the same contributors repo_links = {} repo_notoriety = {} @@ -209,7 +209,7 @@ def run(API, environ, indata, session): if not session.DB.ES.exists(index=session.DB.dbname, doc_type="source", id = ID): continue repodatas[ID] = session.DB.ES.get(index=session.DB.dbname, doc_type="source", id = ID) - + for ID, repo in repos.items(): mylinks = {} if not ID in repodatas: @@ -246,11 +246,11 @@ def run(API, environ, indata, session): if ID not in repo_notoriety: repo_notoriety[ID] = set() repo_notoriety[ID].update(mylinks.keys()) # How many projects is this repo connected to? - + if ID not in repo_authors: repo_authors[ID] = set() repo_authors[ID].update(repo) # How many projects is this repo connected to? - + if ID != oID: repo_commits[ID] = repo_commits.get(ID, 0) + repo_commits[oID] if repo_commits[ID] > max_commits: @@ -259,7 +259,7 @@ def run(API, environ, indata, session): max_links = len(repo_notoriety[ID]) if len(repo_authors[ID]) > max_authors: max_authors = len(repo_authors[ID]) # Used for calculating max sphere size in charts - + # Now, pull it all together! nodes = [] links = [] @@ -282,7 +282,7 @@ def run(API, environ, indata, session): } nodes.append(doc) existing_repos.append(sourceID) - + for k, s in repo_links.items(): size = s fr, to = k.split('@') @@ -295,7 +295,7 @@ def run(API, environ, indata, session): 'tooltip': "%u contributors in common" % size } links.append(doc) - + JSON_OUT = { 'maxLinks': max_links, 'maxShared': max_shared, diff --git a/api/pages/issue/retention.py b/api/pages/issue/retention.py index 22e021e3..3d745418 100644 --- a/api/pages/issue/retention.py +++ b/api/pages/issue/retention.py @@ -58,7 +58,7 @@ # - cookieAuth: [] # summary: Shows retention metrics for a set of issue trackers over a given period # of time -# +# ######################################################################## @@ -75,37 +75,37 @@ import datetime def run(API, environ, indata, session): - + # We need to be logged in for this! if not session.user: raise API.exception(403, "You must be logged in to use this API endpoint! %s") - + now = time.time() - + # First, fetch the view if we have such a thing enabled viewList = [] if indata.get('view'): viewList = session.getView(indata.get('view')) if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - - + viewList = session.subFilter(indata.get('subfilter'), view = viewList) + + hl = indata.get('span', 12) # By default, we define a contributor as active if having committer in the past year tnow = datetime.date.today() nm = tnow.month - (tnow.month % 3) ny = tnow.year cy = ny ts = [] - + if nm < 1: nm += 12 ny = ny - 1 - + peopleSeen = {} activePeople = {} allPeople = {} FoundSomething = False - + ny = 1970 while ny < cy or (ny == cy and (nm+3) <= tnow.month): d = datetime.date(ny, nm, 1) @@ -118,7 +118,7 @@ def run(API, environ, indata, session): break d = datetime.date(ny, nm, 1) tf = time.mktime(d.timetuple()) - + #################################################################### #################################################################### dOrg = session.user['defaultOrganisation'] or "apache" @@ -148,32 +148,32 @@ def run(API, environ, indata, session): query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) elif viewList: query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) - + # Get an initial count of commits res = session.DB.ES.count( index=session.DB.dbname, doc_type="issue", body = query ) - + globcount = res['count'] if globcount == 0 and FoundSomething == False: continue FoundSomething = True - + # Get top 1000 committers this period query['aggs'] = { 'by_o': { 'terms': { 'field': 'issueCloser', 'size': 50000 - } + } }, 'by_c': { 'terms': { 'field': 'issueCreator', 'size': 50000 - } + } } } res = session.DB.ES.search( @@ -182,12 +182,12 @@ def run(API, environ, indata, session): size = 0, body = query ) - - + + retained = 0 added = 0 lost = 0 - + thisPeriod = [] for bucket in res['aggregations']['by_o']['buckets']: who = bucket['key'] @@ -198,7 +198,7 @@ def run(API, environ, indata, session): activePeople[who] = tf if who not in allPeople: allPeople[who] = tf - + for bucket in res['aggregations']['by_c']['buckets']: who = bucket['key'] thisPeriod.append(who) @@ -209,13 +209,13 @@ def run(API, environ, indata, session): activePeople[who] = tf if who not in allPeople: allPeople[who] = tf - + prune = [] for k, v in activePeople.items(): if v < (t - (hl*30.45*86400)): prune.append(k) lost += 1 - + for who in prune: del activePeople[who] del peopleSeen[who] @@ -227,14 +227,14 @@ def run(API, environ, indata, session): 'People retained': retained, 'Active people': added + retained }) - + groups = [ ['More than 5 years', (5*365*86400)+1], ['2 - 5 years', (2*365*86400)+1], ['1 - 2 years', (365*86400)], ['Less than a year', 1] ] - + counts = {} totExp = 0 for person, age in activePeople.items(): @@ -244,9 +244,9 @@ def run(API, environ, indata, session): counts[el[0]] = counts.get(el[0], 0) + 1 break avgyr = (totExp / (86400*365)) / max(len(activePeople),1) - + ts = sorted(ts, key = lambda x: x['date']) - + avgm = "" yr = int(avgyr) ym = round((avgyr-yr)*12) diff --git a/api/pages/issue/top-count.py b/api/pages/issue/top-count.py index f17f721b..ebb121c8 100644 --- a/api/pages/issue/top-count.py +++ b/api/pages/issue/top-count.py @@ -56,7 +56,7 @@ # security: # - cookieAuth: [] # summary: Shows top 25 issue trackers by issues -# +# ######################################################################## @@ -72,24 +72,24 @@ import re def run(API, environ, indata, session): - + # We need to be logged in for this! if not session.user: raise API.exception(403, "You must be logged in to use this API endpoint! %s") - + now = time.time() - + # First, fetch the view if we have such a thing enabled viewList = [] if indata.get('view'): viewList = session.getView(indata.get('view')) if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - - + viewList = session.subFilter(indata.get('subfilter'), view = viewList) + + dateTo = indata.get('to', int(time.time())) dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span - + #################################################################### #################################################################### dOrg = session.user['defaultOrganisation'] or "apache" @@ -125,15 +125,15 @@ def run(API, environ, indata, session): {'term': {'issueCloser': indata.get('email')}} ] query['query']['bool']['minimum_should_match'] = 1 - - + + # Get top 25 committers this period query['aggs'] = { 'by_repo': { 'terms': { 'field': 'sourceID', 'size': 5000 - } + } } } res = session.DB.ES.search( @@ -142,7 +142,7 @@ def run(API, environ, indata, session): size = 0, body = query ) - + toprepos = [] for bucket in res['aggregations']['by_repo']['buckets']: ID = bucket['key'] @@ -151,7 +151,7 @@ def run(API, environ, indata, session): repo = re.sub(r".+/([^/]+)$", r"\1", it['sourceURL']) count = bucket['doc_count'] toprepos.append([repo, count]) - + toprepos = sorted(toprepos, key = lambda x: x[1], reverse = True) top = toprepos[0:24] if len(toprepos) > 25: @@ -159,11 +159,11 @@ def run(API, environ, indata, session): for repo in toprepos[25:]: count += repo[1] top.append(["Other trackers", count]) - + tophash = {} for v in top: tophash[v[0]] = v[1] - + JSON_OUT = { 'counts': tophash, 'okay': True, diff --git a/api/pages/issue/top.py b/api/pages/issue/top.py index 33cde721..42b4d38e 100644 --- a/api/pages/issue/top.py +++ b/api/pages/issue/top.py @@ -56,7 +56,7 @@ # security: # - cookieAuth: [] # summary: Shows the top N issues by interactions -# +# ######################################################################## @@ -72,27 +72,27 @@ import hashlib def run(API, environ, indata, session): - + # We need to be logged in for this! if not session.user: raise API.exception(403, "You must be logged in to use this API endpoint! %s") - + now = time.time() - + # First, fetch the view if we have such a thing enabled viewList = [] if indata.get('view'): viewList = session.getView(indata.get('view')) if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - - + viewList = session.subFilter(indata.get('subfilter'), view = viewList) + + dateTo = indata.get('to', int(time.time())) dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span - + interval = indata.get('interval', 'month') - - + + #################################################################### #################################################################### dOrg = session.user['defaultOrganisation'] or "apache" @@ -128,7 +128,7 @@ def run(API, environ, indata, session): if indata.get('email'): query['query']['bool']['should'] = [{'term': {'issueCreator': indata.get('email')}}, {'term': {'issueCloser': indata.get('email')}}] query['query']['bool']['minimum_should_match'] = 1 - + res = session.DB.ES.search( index=session.DB.dbname, doc_type="issue", @@ -143,8 +143,8 @@ def run(API, environ, indata, session): doc['subject'] = doc.get('title') doc['count'] = doc.get('comments', 0) top.append(doc) - - + + JSON_OUT = { 'topN': { 'denoter': 'interactions', diff --git a/api/pages/issue/trends.py b/api/pages/issue/trends.py index 7387d88f..c6b45152 100644 --- a/api/pages/issue/trends.py +++ b/api/pages/issue/trends.py @@ -56,7 +56,7 @@ # security: # - cookieAuth: [] # summary: Shows trend data for a set of issue trackers over a given period of time -# +# ######################################################################## @@ -71,30 +71,30 @@ import time def run(API, environ, indata, session): - + # We need to be logged in for this! if not session.user: raise API.exception(403, "You must be logged in to use this API endpoint! %s") - + now = time.time() - + # First, fetch the view if we have such a thing enabled viewList = [] if indata.get('view'): viewList = session.getView(indata.get('view')) if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - - + viewList = session.subFilter(indata.get('subfilter'), view = viewList) + + dateTo = indata.get('to', int(time.time())) dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span if dateFrom < 0: dateFrom = 0 dateYonder = dateFrom - (dateTo - dateFrom) - - + + dOrg = session.user['defaultOrganisation'] or "apache" - + #################################################################### # We start by doing all the queries for THIS period. # # Then we reset the query, and change date to yonder-->from # @@ -129,7 +129,7 @@ def run(API, environ, indata, session): if indata.get('email'): query['query']['bool']['should'] = [{'term': {'issueCreator': indata.get('email')}}, {'term': {'issueCloser': indata.get('email')}}] query['query']['bool']['minimum_should_match'] = 1 - + # Get number of issues created, this period res = session.DB.ES.count( index=session.DB.dbname, @@ -137,8 +137,8 @@ def run(API, environ, indata, session): body = query ) no_issues_created = res['count'] - - + + # Get number of open/close, this period query['aggs'] = { 'opener': { @@ -154,10 +154,10 @@ def run(API, environ, indata, session): body = query ) no_creators = res['aggregations']['opener']['value'] - - + + # CLOSERS - + query = { 'query': { 'bool': { @@ -187,7 +187,7 @@ def run(API, environ, indata, session): if indata.get('email'): query['query']['bool']['should'] = [{'term': {'issueCreator': indata.get('email')}}, {'term': {'issueCloser': indata.get('email')}}] query['query']['bool']['minimum_should_match'] = 1 - + # Get number of issues created, this period res = session.DB.ES.count( index=session.DB.dbname, @@ -195,8 +195,8 @@ def run(API, environ, indata, session): body = query ) no_issues_closed = res['count'] - - + + # Get number of open/close, this period query['aggs'] = { 'closer': { @@ -212,9 +212,9 @@ def run(API, environ, indata, session): body = query ) no_closers = res['aggregations']['closer']['value'] - - - + + + #################################################################### # Change to PRIOR SPAN # #################################################################### @@ -244,7 +244,7 @@ def run(API, environ, indata, session): if indata.get('email'): query['query']['bool']['should'] = [{'term': {'issueCreator': indata.get('email')}}, {'term': {'issueCloser': indata.get('email')}}] query['query']['bool']['minimum_should_match'] = 1 - + # Get number of issues, this period res = session.DB.ES.count( index=session.DB.dbname, @@ -252,7 +252,7 @@ def run(API, environ, indata, session): body = query ) no_issues_created_before = res['count'] - + # Get number of committers, this period query['aggs'] = { 'opener': { @@ -268,11 +268,11 @@ def run(API, environ, indata, session): body = query ) no_creators_before = res['aggregations']['opener']['value'] - - - + + + # CLOSERS - + query = { 'query': { 'bool': { @@ -299,7 +299,7 @@ def run(API, environ, indata, session): if indata.get('email'): query['query']['bool']['should'] = [{'term': {'issueCreator': indata.get('email')}}, {'term': {'issueCloser': indata.get('email')}}] query['query']['bool']['minimum_should_match'] = 1 - + # Get number of issues created, this period res = session.DB.ES.count( index=session.DB.dbname, @@ -307,8 +307,8 @@ def run(API, environ, indata, session): body = query ) no_issues_closed_before = res['count'] - - + + # Get number of open/close, this period query['aggs'] = { 'closer': { @@ -324,8 +324,8 @@ def run(API, environ, indata, session): body = query ) no_closers_before = res['aggregations']['closer']['value'] - - + + trends = { "created": { 'before': no_issues_created_before, @@ -348,7 +348,7 @@ def run(API, environ, indata, session): 'title': "People closing issues this period" } } - + JSON_OUT = { 'trends': trends, 'okay': True, diff --git a/api/pages/mail/map.py b/api/pages/mail/map.py index 3c446cc8..b0f8398c 100644 --- a/api/pages/mail/map.py +++ b/api/pages/mail/map.py @@ -56,7 +56,7 @@ # security: # - cookieAuth: [] # summary: Shows a breakdown of email author reply mappings -# +# ######################################################################## @@ -77,24 +77,24 @@ badBots = r"(JIRA|Hudson|jira|jenkins|GitHub|git@|dev@|bugzilla|gerrit)" def run(API, environ, indata, session): - + # We need to be logged in for this! if not session.user: raise API.exception(403, "You must be logged in to use this API endpoint! %s") - + now = time.time() - + # First, fetch the view if we have such a thing enabled viewList = [] if indata.get('view'): viewList = session.getView(indata.get('view')) if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - + viewList = session.subFilter(indata.get('subfilter'), view = viewList) + dateTo = indata.get('to', int(time.time())) dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span span = dateTo - dateFrom - + #################################################################### #################################################################### dOrg = session.user['defaultOrganisation'] or "apache" @@ -126,21 +126,21 @@ def run(API, environ, indata, session): query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) if indata.get('search'): query['query']['bool']['must'].append({'regexp': {'subject': indata.get('search')}}) - + if indata.get('email'): query['query']['bool']['minimum_should_match'] = 1 query['query']['bool']['should'] = [ {'term': {'replyto.keyword': indata.get('email')}}, {'term': {'sender': indata.get('email')}}, ] - + # Get number of commits, this period, per repo query['aggs'] = { 'per_ml': { 'terms': { 'field': 'replyto.keyword' if not indata.get('author') else 'sender', 'size': 150 - } + } } } res = session.DB.ES.search( @@ -149,7 +149,7 @@ def run(API, environ, indata, session): size = 0, body = query ) - + repos = {} repo_commits = {} authorlinks = {} @@ -158,11 +158,11 @@ def run(API, environ, indata, session): max_shared = 0 max_authors = 0 minLinks = indata.get('links', 1) - + if indata.get('email'): del query['query']['bool']['should'] del query['query']['bool']['minimum_should_match'] - + # For each repo, count commits and gather data on authors for doc in res['aggregations']['per_ml']['buckets']: sourceID = doc['key'] @@ -171,19 +171,19 @@ def run(API, environ, indata, session): continue if emails > (span/86400)*4: # More than 4/day and we consider you a bot! continue - - + + # Gather the unique authors/committers query['aggs'] = { 'per_ml': { 'terms': { 'field': 'sender' if not indata.get('author') else 'replyto.keyword', 'size': 5000 - } + } } } xquery = copy.deepcopy(query) - + xquery['query']['bool']['must'].append({'term': {'replyto.keyword' if not indata.get('author') else 'sender': sourceID}}) xres = session.DB.ES.search( index=session.DB.dbname, @@ -199,7 +199,7 @@ def run(API, environ, indata, session): max_emails = emails repos[sourceID] = authors repo_commits[sourceID] = emails - + # Now, figure out which repos share the same contributors repo_links = {} repo_notoriety = {} @@ -213,7 +213,7 @@ def run(API, environ, indata, session): if not session.DB.ES.exists(index=session.DB.dbname, doc_type="person", id = hID): continue repodatas[ID] = session.DB.ES.get(index=session.DB.dbname, doc_type="person", id = hID) - + for ID, repo in repos.items(): mylinks = {} if not ID in repodatas: @@ -233,7 +233,7 @@ def run(API, environ, indata, session): if m: xID = m.group(1) if xID != ID: - + if ID in xrepo: xlinks.append(xID) lname = "%s||%s" % (ID, xID) # Link name @@ -248,11 +248,11 @@ def run(API, environ, indata, session): if ID not in repo_notoriety: repo_notoriety[ID] = set() repo_notoriety[ID].update(mylinks.keys()) # How many projects is this repo connected to? - + if ID not in repo_authors: repo_authors[ID] = set() repo_authors[ID].update(repo) # How many projects is this repo connected to? - + if ID != oID: repo_commits[ID] = repo_commits.get(ID, 0) + repo_commits[oID] if repo_commits[ID] > max_emails: @@ -261,7 +261,7 @@ def run(API, environ, indata, session): max_links = len(repo_notoriety[ID]) if len(repo_authors[ID]) > max_authors: max_authors = len(repo_authors[ID]) # Used for calculating max sphere size in charts - + # Now, pull it all together! nodes = [] links = [] @@ -285,7 +285,7 @@ def run(API, environ, indata, session): } nodes.append(doc) existing_repos.append(sourceID) - + for k, s in repo_links.items(): size = s fr, to = k.split('||') @@ -298,7 +298,7 @@ def run(API, environ, indata, session): 'tooltip': "%u topics exchanged" % size } links.append(doc) - + JSON_OUT = { 'maxLinks': max_links, 'maxShared': max_shared, diff --git a/api/pages/mail/mood-timeseries.py b/api/pages/mail/mood-timeseries.py index be7e3b28..21f26d03 100644 --- a/api/pages/mail/mood-timeseries.py +++ b/api/pages/mail/mood-timeseries.py @@ -56,7 +56,7 @@ # security: # - cookieAuth: [] # summary: Shows a breakdown of the (analyzed) mood in emails as a timeseries -# +# ######################################################################## @@ -71,29 +71,29 @@ import time def run(API, environ, indata, session): - + # We need to be logged in for this! if not session.user: raise API.exception(403, "You must be logged in to use this API endpoint! %s") - - + + # First, fetch the view if we have such a thing enabled viewList = [] if indata.get('view'): viewList = session.getView(indata.get('view')) if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - + viewList = session.subFilter(indata.get('subfilter'), view = viewList) + dateTo = indata.get('to', int(time.time())) dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span interval = indata.get('interval', 'week') - + # Define moods we know of moods_good = set(['trust', 'joy', 'confident', 'positive']) moods_bad = set(['sadness', 'anger', 'disgust', 'fear', 'negative']) moods_neutral = set(['anticipation', 'surprise', 'tentative', 'analytical', 'neutral']) all_moods = set(moods_good | moods_bad | moods_neutral) - + # Fetch all sources for default org dOrg = session.user['defaultOrganisation'] or "apache" query = { @@ -126,13 +126,13 @@ def run(API, environ, indata, session): query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) elif viewList: query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) - + emls = session.DB.ES.count( index=session.DB.dbname, doc_type="email", body = query )['count'] - + query['aggs'] = { 'history': { 'date_histogram': { @@ -143,26 +143,26 @@ def run(API, environ, indata, session): } } } - + # Add aggregations for moods for mood in all_moods: query['aggs']['history']['aggs'][mood] = { 'sum': { 'field': "mood.%s" % mood - } + } } - - + + res = session.DB.ES.search( index=session.DB.dbname, doc_type="email", size = 0, body = query ) - + timeseries = [] - - + + for tz in res['aggregations']['history']['buckets']: moods = {} emls = tz['doc_count'] @@ -170,7 +170,7 @@ def run(API, environ, indata, session): moods[mood] = int (100 * tz.get(mood, {'value':0})['value'] / max(1, emls)) moods['date'] = int(tz['key']/1000) timeseries.append(moods) - + JSON_OUT = { 'timeseries': timeseries, 'okay': True diff --git a/api/pages/mail/mood.py b/api/pages/mail/mood.py index a1beb46b..38dd57fc 100644 --- a/api/pages/mail/mood.py +++ b/api/pages/mail/mood.py @@ -56,7 +56,7 @@ # security: # - cookieAuth: [] # summary: Shows a breakdown of the (analyzed) mood in emails -# +# ######################################################################## @@ -71,28 +71,28 @@ import time def run(API, environ, indata, session): - + # We need to be logged in for this! if not session.user: raise API.exception(403, "You must be logged in to use this API endpoint! %s") - - + + # First, fetch the view if we have such a thing enabled viewList = [] if indata.get('view'): viewList = session.getView(indata.get('view')) if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - + viewList = session.subFilter(indata.get('subfilter'), view = viewList) + dateTo = indata.get('to', int(time.time())) dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span - + # Define moods we know of moods_good = set(['trust', 'joy', 'confident', 'positive']) moods_bad = set(['sadness', 'anger', 'disgust', 'fear', 'negative']) moods_neutral = set(['anticipation', 'surprise', 'tentative', 'analytical', 'neutral']) all_moods = set(moods_good | moods_bad | moods_neutral) - + # Start off with a query for the entire org (we want to compare) dOrg = session.user['defaultOrganisation'] or "apache" query = { @@ -120,26 +120,26 @@ def run(API, environ, indata, session): } } } - + # Count all emails, for averaging scores gemls = session.DB.ES.count( index=session.DB.dbname, doc_type="email", body = query )['count'] - + # Add aggregations for moods query['aggs'] = { - + } for mood in all_moods: query['aggs'][mood] = { 'sum': { 'field': "mood.%s" % mood - } + } } - - + + global_mood_compiled = {} mood_compiled = {} txt = "This chart shows the ten potential mood types as they average on the emails in this period. A score of 100 means a sentiment is highly visible in most emails." @@ -150,7 +150,7 @@ def run(API, environ, indata, session): txt = "This chart shows the ten potential mood types on the selected lists as they compare against all mailing lists in the database. A score of 100 here means the sentiment conforms to averages across all lists." gtxt = "This shows the overall estimated mood compared to all lists, as a gauge from terrible to good." global_moods = {} - + gres = session.DB.ES.search( index=session.DB.dbname, doc_type="email", @@ -165,7 +165,7 @@ def run(API, environ, indata, session): for k, v in global_moods.items(): if v >= 0: global_mood_compiled[k] = int( (v / max(1,gemls)) * 100) - + # Now, if we have a view (or not distinguishing), ... ss = False if indata.get('source'): @@ -174,7 +174,7 @@ def run(API, environ, indata, session): elif viewList: query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) ss = True - + # If we have a view enabled (and distinguish), compile local view against global view # Else, just copy global as local if ss or not indata.get('relative'): @@ -184,17 +184,17 @@ def run(API, environ, indata, session): size = 0, body = query ) - + del query['aggs'] # we have to remove these to do a count() emls = session.DB.ES.count( index=session.DB.dbname, doc_type="email", body = query )['count'] - + moods = {} years = 0 - + for mood, el in res['aggregations'].items(): if el['value'] == 0: el['value'] == -1 @@ -204,13 +204,13 @@ def run(API, environ, indata, session): mood_compiled[k] = int(100 * int( ( v / max(1,emls)) * 100) / max(1, global_mood_compiled.get(k, 100))) else: mood_compiled = global_mood_compiled - + # If relative mode and a field is missing, assume 100 (norm) if indata.get('relative'): for M in all_moods: if mood_compiled.get(M, 0) == 0: mood_compiled[M] = 100 - + # Compile an overall happiness level MAX = max(max(mood_compiled.values()),1) X = 100 if indata.get('relative') else 0 @@ -218,9 +218,9 @@ def run(API, environ, indata, session): for B in moods_bad: if mood_compiled.get(B) and mood_compiled[B] > X: bads += mood_compiled[B] - + happ = 50 - + goods = X for B in moods_good: if mood_compiled.get(B) and mood_compiled[B] > X: @@ -231,7 +231,7 @@ def run(API, environ, indata, session): if goods > 0: happ += (50*goods/MAX) swingometer = max(0, min(100, happ)) - + # JSON out! JSON_OUT = { 'relativeMode': True, diff --git a/api/pages/mail/pony-timeseries.py b/api/pages/mail/pony-timeseries.py index fefd7762..37d160fb 100644 --- a/api/pages/mail/pony-timeseries.py +++ b/api/pages/mail/pony-timeseries.py @@ -56,7 +56,7 @@ # security: # - cookieAuth: [] # summary: Shows timeseries of Pony Factor over time -# +# ######################################################################## @@ -74,30 +74,30 @@ import dateutil.relativedelta def run(API, environ, indata, session): - + # We need to be logged in for this! if not session.user: raise API.exception(403, "You must be logged in to use this API endpoint! %s") - + now = time.time() - + # First, fetch the view if we have such a thing enabled viewList = [] if indata.get('view'): viewList = session.getView(indata.get('view')) if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - + viewList = session.subFilter(indata.get('subfilter'), view = viewList) + hl = indata.get('span', 24) tnow = datetime.date.today() nm = tnow.month - (tnow.month % 3) ny = tnow.year ts = [] - + if nm < 1: nm += 12 ny = ny - 1 - + while ny > 1970: d = datetime.date(ny, nm, 1) t = time.mktime(d.timetuple()) @@ -107,8 +107,8 @@ def run(API, environ, indata, session): if nm < 1: nm += 12 ny = ny - 1 - - + + #################################################################### #################################################################### dOrg = session.user['defaultOrganisation'] or "apache" @@ -145,25 +145,25 @@ def run(API, environ, indata, session): query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) elif viewList: query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) - + # Get an initial count of commits res = session.DB.ES.count( index=session.DB.dbname, doc_type="email", body = query ) - + globcount = res['count'] if globcount == 0: break - + # Get top 25 committers this period query['aggs'] = { 'by_sender': { 'terms': { 'field': 'sender', 'size': 2500 - } + } } } res = session.DB.ES.search( @@ -172,8 +172,8 @@ def run(API, environ, indata, session): size = 0, body = query ) - - + + # PF for authors pf_author = 0 pf_author_count = 0 @@ -196,9 +196,9 @@ def run(API, environ, indata, session): 'Pony Factor (authors)': pf_author, 'Meta-Pony Factor': len(cpf) }) - + ts = sorted(ts, key = lambda x: x['date']) - + JSON_OUT = { 'text': "This shows Pony Factors as calculated over a %u month timespan. Authorship is a measure of the people it takes to make up the bulk of email traffic, and meta-pony is an estimation of how many organisations/companies are involved." % hl, 'timeseries': ts, diff --git a/api/pages/mail/relationships.py b/api/pages/mail/relationships.py index 16a0cdb0..c10b6343 100644 --- a/api/pages/mail/relationships.py +++ b/api/pages/mail/relationships.py @@ -56,7 +56,7 @@ # security: # - cookieAuth: [] # summary: Shows a breakdown of contributor relationships between mailing lists -# +# ######################################################################## @@ -75,24 +75,24 @@ import math def run(API, environ, indata, session): - + # We need to be logged in for this! if not session.user: raise API.exception(403, "You must be logged in to use this API endpoint! %s") - + now = time.time() - + # First, fetch the view if we have such a thing enabled viewList = [] if indata.get('view'): viewList = session.getView(indata.get('view')) if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - + viewList = session.subFilter(indata.get('subfilter'), view = viewList) + dateTo = indata.get('to', int(time.time())) dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span - - + + #################################################################### #################################################################### dOrg = session.user['defaultOrganisation'] or "apache" @@ -124,14 +124,14 @@ def run(API, environ, indata, session): query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) if indata.get('email'): query['query']['bool']['must'].append({'term': {'sender': indata.get('email')}}) - + # Get number of commits, this period, per repo query['aggs'] = { 'per_ml': { 'terms': { 'field': 'sourceID', 'size': 10000 - } + } } } res = session.DB.ES.search( @@ -140,7 +140,7 @@ def run(API, environ, indata, session): size = 0, body = query ) - + repos = {} repo_commits = {} authorlinks = {} @@ -149,19 +149,19 @@ def run(API, environ, indata, session): max_shared = 0 max_authors = 0 minLinks = indata.get('links', 1) - + # For each repo, count commits and gather data on authors for doc in res['aggregations']['per_ml']['buckets']: sourceID = doc['key'] emails = doc['doc_count'] - + # Gather the unique authors/committers query['aggs'] = { 'per_ml': { 'terms': { 'field': 'sender', 'size': 10000 - } + } } } xquery = copy.deepcopy(query) @@ -179,7 +179,7 @@ def run(API, environ, indata, session): max_emails = emails repos[sourceID] = authors repo_commits[sourceID] = emails - + # Now, figure out which repos share the same contributors repo_links = {} repo_notoriety = {} @@ -192,7 +192,7 @@ def run(API, environ, indata, session): if not session.DB.ES.exists(index=session.DB.dbname, doc_type="source", id = ID): continue repodatas[ID] = session.DB.ES.get(index=session.DB.dbname, doc_type="source", id = ID) - + for ID, repo in repos.items(): mylinks = {} if not ID in repodatas: @@ -229,11 +229,11 @@ def run(API, environ, indata, session): if ID not in repo_notoriety: repo_notoriety[ID] = set() repo_notoriety[ID].update(mylinks.keys()) # How many projects is this repo connected to? - + if ID not in repo_authors: repo_authors[ID] = set() repo_authors[ID].update(repo) # How many projects is this repo connected to? - + if ID != oID: repo_commits[ID] = repo_commits.get(ID, 0) + repo_commits[oID] if repo_commits[ID] > max_emails: @@ -242,7 +242,7 @@ def run(API, environ, indata, session): max_links = len(repo_notoriety[ID]) if len(repo_authors[ID]) > max_authors: max_authors = len(repo_authors[ID]) # Used for calculating max sphere size in charts - + # Now, pull it all together! nodes = [] links = [] @@ -265,7 +265,7 @@ def run(API, environ, indata, session): } nodes.append(doc) existing_repos.append(sourceID) - + for k, s in repo_links.items(): size = s fr, to = k.split('||') @@ -278,7 +278,7 @@ def run(API, environ, indata, session): 'tooltip': "%u contributors in common" % size } links.append(doc) - + JSON_OUT = { 'maxLinks': max_links, 'maxShared': max_shared, diff --git a/api/pages/mail/retention.py b/api/pages/mail/retention.py index 6734da11..bc93b9ce 100644 --- a/api/pages/mail/retention.py +++ b/api/pages/mail/retention.py @@ -58,7 +58,7 @@ # - cookieAuth: [] # summary: Shows retention metrics for a set of mailing lists over a given period # of time -# +# ######################################################################## @@ -75,36 +75,36 @@ import datetime def run(API, environ, indata, session): - + # We need to be logged in for this! if not session.user: raise API.exception(403, "You must be logged in to use this API endpoint! %s") - + now = time.time() - + # First, fetch the view if we have such a thing enabled viewList = [] if indata.get('view'): viewList = session.getView(indata.get('view')) if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - - + viewList = session.subFilter(indata.get('subfilter'), view = viewList) + + hl = indata.get('span', 12) # By default, we define a contributor as active if having committer in the past year tnow = datetime.date.today() nm = tnow.month - (tnow.month % 3) ny = tnow.year cy = ny ts = [] - + if nm < 1: nm += 12 ny = ny - 1 - + peopleSeen = {} activePeople = {} allPeople = {} - + ny = 1970 FoundSomething = False while ny < cy or (ny == cy and (nm+3) <= tnow.month): @@ -118,8 +118,8 @@ def run(API, environ, indata, session): break d = datetime.date(ny, nm, 1) tf = time.mktime(d.timetuple()) - - + + #################################################################### #################################################################### dOrg = session.user['defaultOrganisation'] or "apache" @@ -149,14 +149,14 @@ def run(API, environ, indata, session): query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) elif viewList: query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) - + # Get an initial count of commits res = session.DB.ES.count( index=session.DB.dbname, doc_type="email", body = query ) - + globcount = res['count'] if globcount == 0 and not FoundSomething: continue @@ -167,7 +167,7 @@ def run(API, environ, indata, session): 'terms': { 'field': 'sender', 'size': 200000 - } + } } } res = session.DB.ES.search( @@ -176,12 +176,12 @@ def run(API, environ, indata, session): size = 0, body = query ) - - + + retained = 0 added = 0 lost = 0 - + thisPeriod = [] for bucket in res['aggregations']['by_author']['buckets']: who = bucket['key'] @@ -192,18 +192,18 @@ def run(API, environ, indata, session): activePeople[who] = tf if who not in allPeople: allPeople[who] = tf - + prune = [] for k, v in activePeople.items(): if v < (t - (hl*30.45*86400)): prune.append(k) lost += 1 - + for who in prune: del activePeople[who] del peopleSeen[who] retained = len(activePeople) - added - + ts.append({ 'date': tf, 'People who (re)joined': added, @@ -211,14 +211,14 @@ def run(API, environ, indata, session): 'People retained': retained, 'Active people': added + retained }) - + groups = [ ['More than 5 years', (5*365*86400)+1], ['2 - 5 years', (2*365*86400)+1], ['1 - 2 years', (365*86400)], ['Less than a year', 1] ] - + counts = {} totExp = 0 for person, age in activePeople.items(): @@ -228,9 +228,9 @@ def run(API, environ, indata, session): counts[el[0]] = counts.get(el[0], 0) + 1 break avgyr = (totExp / (86400*365)) / max(len(activePeople),1) - + ts = sorted(ts, key = lambda x: x['date']) - + avgm = "" yr = int(avgyr) ym = round((avgyr-yr)*12) diff --git a/api/pages/mail/timeseries-single.py b/api/pages/mail/timeseries-single.py index 855539cc..8b8b231c 100644 --- a/api/pages/mail/timeseries-single.py +++ b/api/pages/mail/timeseries-single.py @@ -56,7 +56,7 @@ # security: # - cookieAuth: [] # summary: Shows email sent over time -# +# ######################################################################## @@ -73,27 +73,27 @@ import hashlib def run(API, environ, indata, session): - + # We need to be logged in for this! if not session.user: raise API.exception(403, "You must be logged in to use this API endpoint! %s") - + now = time.time() - + # First, fetch the view if we have such a thing enabled viewList = [] if indata.get('view'): viewList = session.getView(indata.get('view')) if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - - + viewList = session.subFilter(indata.get('subfilter'), view = viewList) + + dateTo = indata.get('to', int(time.time())) dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span - + interval = indata.get('interval', 'month') - - + + #################################################################### #################################################################### dOrg = session.user['defaultOrganisation'] or "apache" @@ -126,7 +126,7 @@ def run(API, environ, indata, session): if indata.get('email'): query['query']['bool']['should'] = [{'term': {'sender': indata.get('email')}}] query['query']['bool']['minimum_should_match'] = 1 - + # Get number of committers, this period query['aggs'] = { 'timeseries': { @@ -142,7 +142,7 @@ def run(API, environ, indata, session): size = 0, body = query ) - + timeseries = [] for bucket in res['aggregations']['timeseries']['buckets']: ts = int(bucket['key'] / 1000) @@ -150,7 +150,7 @@ def run(API, environ, indata, session): 'date': ts, 'emails': bucket['doc_count'] }) - + JSON_OUT = { 'widgetType': { 'chartType': 'bar' # Recommendation for the UI diff --git a/api/pages/mail/timeseries.py b/api/pages/mail/timeseries.py index 7b446f42..ab4e12a1 100644 --- a/api/pages/mail/timeseries.py +++ b/api/pages/mail/timeseries.py @@ -56,7 +56,7 @@ # security: # - cookieAuth: [] # summary: Shows email sent over time -# +# ######################################################################## @@ -72,33 +72,33 @@ import hashlib def run(API, environ, indata, session): - + # We need to be logged in for this! if not session.user: raise API.exception(403, "You must be logged in to use this API endpoint! %s") - + now = time.time() - + # First, fetch the view if we have such a thing enabled viewList = [] if indata.get('view'): viewList = session.getView(indata.get('view')) if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - - + viewList = session.subFilter(indata.get('subfilter'), view = viewList) + + dateTo = indata.get('to', int(time.time())) dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span - + which = 'committer_email' role = 'committer' if indata.get('author', False): which = 'author_email' role = 'author' - + interval = indata.get('interval', 'month') - - + + #################################################################### #################################################################### dOrg = session.user['defaultOrganisation'] or "apache" @@ -131,7 +131,7 @@ def run(API, environ, indata, session): if indata.get('email'): query['query']['bool']['should'] = [{'term': {'sender': indata.get('email')}}] query['query']['bool']['minimum_should_match'] = 1 - + # Get number of committers, this period query['aggs'] = { 'timeseries': { @@ -164,7 +164,7 @@ def run(API, environ, indata, session): size = 0, body = query ) - + timeseries = [] for bucket in res['aggregations']['timeseries']['buckets']: ts = int(bucket['key'] / 1000) @@ -174,7 +174,7 @@ def run(API, environ, indata, session): 'topics': bucket['topics']['value'], 'authors': bucket['authors']['value'] }) - + JSON_OUT = { 'widgetType': { 'chartType': 'bar' # Recommendation for the UI diff --git a/api/pages/mail/top-authors.py b/api/pages/mail/top-authors.py index 52da07df..c2f2cb21 100644 --- a/api/pages/mail/top-authors.py +++ b/api/pages/mail/top-authors.py @@ -56,7 +56,7 @@ # security: # - cookieAuth: [] # summary: Shows the top N of email authors -# +# ######################################################################## @@ -75,27 +75,27 @@ ROBITS = r"(git|jira|jenkins|gerrit)@" def run(API, environ, indata, session): - + # We need to be logged in for this! if not session.user: raise API.exception(403, "You must be logged in to use this API endpoint! %s") - + now = time.time() - + # First, fetch the view if we have such a thing enabled viewList = [] if indata.get('view'): viewList = session.getView(indata.get('view')) if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - - + viewList = session.subFilter(indata.get('subfilter'), view = viewList) + + dateTo = indata.get('to', int(time.time())) dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span - + interval = indata.get('interval', 'month') - - + + #################################################################### #################################################################### dOrg = session.user['defaultOrganisation'] or "apache" @@ -125,7 +125,7 @@ def run(API, environ, indata, session): query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) elif viewList: query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) - + # Get top 25 committers this period query['aggs'] = { 'authors': { @@ -133,7 +133,7 @@ def run(API, environ, indata, session): 'field': 'sender', 'size': 30 } - } + } } res = session.DB.ES.search( index=session.DB.dbname, @@ -147,7 +147,7 @@ def run(API, environ, indata, session): email = bucket['key'] # By default, we want to see humans, not bots on this list! if re.match(ROBITS, email): - continue + continue count = bucket['doc_count'] sha = hashlib.sha1( ("%s%s" % (dOrg, email)).encode('utf-8') ).hexdigest() if session.DB.ES.exists(index=session.DB.dbname,doc_type="person",id = sha): @@ -161,12 +161,12 @@ def run(API, environ, indata, session): people[email] = person people[email]['gravatar'] = hashlib.md5(person.get('email', 'unknown').encode('utf-8')).hexdigest() people[email]['count'] = count - + topN = [] for email, person in people.items(): topN.append(person) topN = sorted(topN, key = lambda x: x['count'], reverse = True) - + JSON_OUT = { 'topN': { 'denoter': 'emails', diff --git a/api/pages/mail/top-topics.py b/api/pages/mail/top-topics.py index 9acda6cf..c9af57cc 100644 --- a/api/pages/mail/top-topics.py +++ b/api/pages/mail/top-topics.py @@ -56,7 +56,7 @@ # security: # - cookieAuth: [] # summary: Shows the top N of email authors -# +# ######################################################################## @@ -72,27 +72,27 @@ import hashlib def run(API, environ, indata, session): - + # We need to be logged in for this! if not session.user: raise API.exception(403, "You must be logged in to use this API endpoint! %s") - + now = time.time() - + # First, fetch the view if we have such a thing enabled viewList = [] if indata.get('view'): viewList = session.getView(indata.get('view')) if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - - + viewList = session.subFilter(indata.get('subfilter'), view = viewList) + + dateTo = indata.get('to', int(time.time())) dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span - + interval = indata.get('interval', 'month') - - + + #################################################################### #################################################################### dOrg = session.user['defaultOrganisation'] or "apache" @@ -125,7 +125,7 @@ def run(API, environ, indata, session): query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) elif viewList: query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) - + res = session.DB.ES.search( index=session.DB.dbname, doc_type="mailtop", @@ -139,8 +139,8 @@ def run(API, environ, indata, session): 'source': bucket['_source']['sourceURL'], 'name': bucket['_source']['subject'], 'count': bucket['_source']['emails'] - }) - + }) + JSON_OUT = { 'topN': { 'denoter': 'emails', diff --git a/api/pages/mail/trends.py b/api/pages/mail/trends.py index ac0d5186..baa1f1d4 100644 --- a/api/pages/mail/trends.py +++ b/api/pages/mail/trends.py @@ -56,7 +56,7 @@ # security: # - cookieAuth: [] # summary: Shows a quick email trend summary of the past 6 months for your org -# +# ######################################################################## @@ -72,30 +72,30 @@ import datetime def run(API, environ, indata, session): - + # We need to be logged in for this! if not session.user: raise API.exception(403, "You must be logged in to use this API endpoint! %s") - + now = time.time() - + # First, fetch the view if we have such a thing enabled viewList = [] if indata.get('view'): viewList = session.getView(indata.get('view')) if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - - + viewList = session.subFilter(indata.get('subfilter'), view = viewList) + + dateTo = indata.get('to', int(time.time())) dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span if dateFrom < 0: dateFrom = 0 dateYonder = dateFrom - (dateTo - dateFrom) - - + + dOrg = session.user['defaultOrganisation'] or "apache" - + #################################################################### # We start by doing all the queries for THIS period. # # Then we reset the query, and change date to yonder-->from # @@ -129,8 +129,8 @@ def run(API, environ, indata, session): query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) if indata.get('email'): query['query']['bool']['must'].append({'term': {'sender': indata.get('email')}}) - - + + # Get number of threads and emails, this period query['aggs'] = { 'topics': { @@ -152,10 +152,10 @@ def run(API, environ, indata, session): ) no_topics = res['aggregations']['topics']['value'] no_emails = res['aggregations']['emails']['value'] - - + + # Authors - + query = { 'query': { 'bool': { @@ -184,7 +184,7 @@ def run(API, environ, indata, session): query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) if indata.get('email'): query['query']['bool']['must'].append({'term': {'sender': indata.get('email')}}) - + # Get number of authors, this period query['aggs'] = { 'authors': { @@ -200,9 +200,9 @@ def run(API, environ, indata, session): body = query ) no_authors = res['aggregations']['authors']['value'] - - - + + + #################################################################### # Change to PRIOR SPAN # #################################################################### @@ -234,8 +234,8 @@ def run(API, environ, indata, session): query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) if indata.get('email'): query['query']['bool']['must'].append({'term': {'sender': indata.get('email')}}) - - + + # Get number of threads and emails, this period query['aggs'] = { 'topics': { @@ -257,10 +257,10 @@ def run(API, environ, indata, session): ) no_topics_before = res['aggregations']['topics']['value'] no_emails_before = res['aggregations']['emails']['value'] - - + + # Authors - + query = { 'query': { 'bool': { @@ -289,7 +289,7 @@ def run(API, environ, indata, session): query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) if indata.get('email'): query['query']['bool']['must'].append({'term': {'sender': indata.get('email')}}) - + # Get number of authors, this period query['aggs'] = { 'authors': { @@ -305,10 +305,10 @@ def run(API, environ, indata, session): body = query ) no_authors_before = res['aggregations']['authors']['value'] - - - - + + + + trends = { "authors": { 'before': no_authors_before, @@ -326,7 +326,7 @@ def run(API, environ, indata, session): 'title': "Emails sent this period" } } - + JSON_OUT = { 'trends': trends, 'okay': True, diff --git a/api/pages/org/contributors.py b/api/pages/org/contributors.py index 9210dfff..3d243622 100644 --- a/api/pages/org/contributors.py +++ b/api/pages/org/contributors.py @@ -39,7 +39,7 @@ # security: # - cookieAuth: [] # summary: Shows contributors for the entire org or matching filters. -# +# ######################################################################## @@ -57,20 +57,20 @@ cached_people = {} # Store people we know, so we don't have to fetch them again. def run(API, environ, indata, session): - + # We need to be logged in for this! if not session.user: raise API.exception(403, "You must be logged in to use this API endpoint! %s") - - + + # First, fetch the view if we have such a thing enabled viewList = [] if indata.get('view'): viewList = session.getView(indata.get('view')) if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - - + viewList = session.subFilter(indata.get('subfilter'), view = viewList) + + # Fetch all contributors for the org dOrg = session.user['defaultOrganisation'] or "apache" query = { @@ -86,13 +86,13 @@ def run(API, environ, indata, session): } } } - + # Source-specific or view-specific?? if indata.get('source'): query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) elif viewList: query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) - + # Date specific? dateTo = indata.get('to', int(time.time())) dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span @@ -108,7 +108,7 @@ def run(API, environ, indata, session): ) emails = [] contribs = {} - + for field in ['sender', 'author_email', 'issueCreator', 'issueCloser']: N = 0 while N < 5: @@ -121,7 +121,7 @@ def run(API, environ, indata, session): 'partition': N, 'num_partitions': 5 }, - } + } } } res = session.DB.ES.search( @@ -139,7 +139,7 @@ def run(API, environ, indata, session): emails.append(k['key']) contribs[k['key']] = contribs.get(k['key'], 0) + k['doc_count'] N += 1 - + people = [] for email in emails: pid = hashlib.sha1( ("%s%s" % (dOrg, email)).encode('ascii', errors='replace')).hexdigest() @@ -160,7 +160,7 @@ def run(API, environ, indata, session): if person: person['contributions'] = contribs.get(email, 0) people.append(person) - + JSON_OUT = { 'people': people, 'okay': True diff --git a/api/pages/org/list.py b/api/pages/org/list.py index 28d6d6f5..bf28f4d6 100644 --- a/api/pages/org/list.py +++ b/api/pages/org/list.py @@ -83,7 +83,7 @@ # security: # - cookieAuth: [] # summary: Create a new organisation -# +# ######################################################################## @@ -102,7 +102,7 @@ def run(API, environ, indata, session): # We need to be logged in for this! if not session.user: raise API.exception(403, "You must be logged in to use this API endpoint!") - + method = environ['REQUEST_METHOD'] # Are we making a new org? if method == "PUT": @@ -112,7 +112,7 @@ def run(API, environ, indata, session): orgid = indata.get('id', str(int(time.time()))) if session.DB.ES.exists(index=session.DB.dbname, doc_type='organisation', id = orgid): raise API.exception(403, "Organisation ID already in use!") - + doc = { 'id': orgid, 'name': orgname, @@ -125,7 +125,7 @@ def run(API, environ, indata, session): return else: raise API.exception(403, "Only administrators can create new organisations.") - + #################################################################### orgs = [] if session.user['userlevel'] == "admin": @@ -168,8 +168,8 @@ def run(API, environ, indata, session): doc['_source']['sourceCount'] = numSources doc['_source']['docCount'] = numDocs orgs.append(doc['_source']) - - + + JSON_OUT = { 'organisations': orgs, 'okay': True, diff --git a/api/pages/org/members.py b/api/pages/org/members.py index 3b58852c..b749b2fd 100644 --- a/api/pages/org/members.py +++ b/api/pages/org/members.py @@ -106,7 +106,7 @@ # security: # - cookieAuth: [] # summary: Remove a person from an organisation -# +# ######################################################################## @@ -125,7 +125,7 @@ def canInvite(session): """ Determine if the user can edit sources in this org """ if session.user['userlevel'] == 'admin': return True - + dOrg = session.user['defaultOrganisation'] or "apache" if session.DB.ES.exists(index=session.DB.dbname, doc_type="organisation", id= dOrg): xorg = session.DB.ES.get(index=session.DB.dbname, doc_type="organisation", id= dOrg)['_source'] @@ -138,9 +138,9 @@ def run(API, environ, indata, session): # We need to be logged in for this! if not session.user: raise API.exception(403, "You must be logged in to use this API endpoint!") - + method = environ['REQUEST_METHOD'] - + ################################################# # Inviting a new member? # ################################################# @@ -152,18 +152,18 @@ def run(API, environ, indata, session): # Make sure the org exists if not session.DB.ES.exists(index=session.DB.dbname, doc_type='organisation', id = orgid): raise API.exception(403, "No such organisation!") - + # make sure the user account exists if not session.DB.ES.exists(index=session.DB.dbname, doc_type='useraccount', id = newmember): raise API.exception(403, "No such user!") - + # Modify user account doc = session.DB.ES.get(index=session.DB.dbname, doc_type='useraccount', id = newmember) if orgid not in doc['_source']['organisations']: # No duplicates, please doc['_source']['organisations'].append(orgid) session.DB.ES.index(index=session.DB.dbname, doc_type='useraccount', id = newmember, body = doc['_source']) - - + + # Get org doc from ES doc = session.DB.ES.get(index=session.DB.dbname, doc_type='organisation', id = orgid) if isadmin: @@ -172,7 +172,7 @@ def run(API, environ, indata, session): # Override old doc session.DB.ES.index(index=session.DB.dbname, doc_type='organisation', id = orgid, body = doc['_source']) time.sleep(1) # Bleh!! - + # If an admin, and not us, and reinvited, we purge the admin bit elif newmember in doc['_source']['admins']: if newmember == session.user['email']: @@ -182,11 +182,11 @@ def run(API, environ, indata, session): session.DB.ES.index(index=session.DB.dbname, doc_type='organisation', id = orgid, body = doc['_source']) time.sleep(1) # Bleh!! yield json.dumps({"okay": True, "message": "Member invited!!"}) - + return else: raise API.exception(403, "Only administrators or organisation owners can invite new members.") - + ################################################# # DELETE: Remove a member # ################################################# @@ -195,25 +195,25 @@ def run(API, environ, indata, session): memberid = indata.get('email') isadmin = indata.get('admin', False) orgid = session.user['defaultOrganisation'] or "apache" - + # We can't remove ourselves! if memberid == session.user['email']: raise API.exception(403, "You can't remove yourself from an organisation.") - + # Make sure the org exists if not session.DB.ES.exists(index=session.DB.dbname, doc_type='organisation', id = orgid): raise API.exception(403, "No such organisation!") - + # make sure the user account exists if not session.DB.ES.exists(index=session.DB.dbname, doc_type='useraccount', id = memberid): raise API.exception(403, "No such user!") - + # Modify user account doc = session.DB.ES.get(index=session.DB.dbname, doc_type='useraccount', id = memberid) if orgid in doc['_source']['organisations']: # No duplicates, please doc['_source']['organisations'].remove(orgid) session.DB.ES.index(index=session.DB.dbname, doc_type='useraccount', id = memberid, body = doc['_source']) - + # Check is user is admin and remove if so # Get org doc from ES doc = session.DB.ES.get(index=session.DB.dbname, doc_type='organisation', id = orgid) @@ -222,12 +222,12 @@ def run(API, environ, indata, session): # Override old doc session.DB.ES.index(index=session.DB.dbname, doc_type='organisation', id = orgid, body = doc['_source']) time.sleep(1) # Bleh!! - + yield json.dumps({"okay": True, "message": "Member removed!"}) return else: raise API.exception(403, "Only administrators or organisation owners can invite new members.") - + ################################################# # GET/POST: Display members # @@ -236,11 +236,11 @@ def run(API, environ, indata, session): orgid = session.user['defaultOrganisation'] or "apache" if not session.DB.ES.exists(index=session.DB.dbname, doc_type='organisation', id = orgid): raise API.exception(403, "No such organisation!") - + # Only admins should be able to view this! if not canInvite(session): raise API.exception(403, "Only organisation owners can view this list.") - + # Find everyone affiliated with this org query = { 'query': { @@ -264,7 +264,7 @@ def run(API, environ, indata, session): members = [] for doc in res['hits']['hits']: members.append(doc['_id']) - + # Get org doc from ES doc = session.DB.ES.get(index=session.DB.dbname, doc_type='organisation', id = orgid) JSON_OUT = { diff --git a/api/pages/org/sourcetypes.py b/api/pages/org/sourcetypes.py index 9e5b8afe..6fa84025 100644 --- a/api/pages/org/sourcetypes.py +++ b/api/pages/org/sourcetypes.py @@ -56,7 +56,7 @@ # security: # - cookieAuth: [] # summary: Lists the available source types supported by Kibble -# +# ######################################################################## @@ -71,9 +71,7 @@ import json def run(API, environ, indata, session): - + types = yaml.load(open("yaml/sourcetypes.yaml")) - - yield json.dumps(types) - \ No newline at end of file + yield json.dumps(types) diff --git a/api/pages/org/trends.py b/api/pages/org/trends.py index d0188a41..4890c63f 100644 --- a/api/pages/org/trends.py +++ b/api/pages/org/trends.py @@ -56,7 +56,7 @@ # security: # - cookieAuth: [] # summary: Shows a quick trend summary of the past 6 months for your org -# +# ######################################################################## @@ -71,28 +71,28 @@ import time def run(API, environ, indata, session): - + # We need to be logged in for this! if not session.user: raise API.exception(403, "You must be logged in to use this API endpoint! %s") - + now = time.time() - + # First, fetch the view if we have such a thing enabled viewList = [] if indata.get('view'): if session.DB.ES.exists(index=session.DB.dbname, doc_type="view", id = indata['view']): view = session.DB.ES.get(index=session.DB.dbname, doc_type="view", id = indata['view']) viewList = view['_source']['sourceList'] - + dateTo = int(time.time()) dateFrom = dateTo - (86400*30*3) # Default to a quarter if dateFrom < 0: dateFrom = 0 dateYonder = dateFrom - (dateTo - dateFrom) - - - + + + #################################################################### # We start by doing all the queries for THIS period. # # Then we reset the query, and change date to yonder-->from # @@ -120,7 +120,7 @@ def run(API, environ, indata, session): } } } - + # Get number of commits, this period res = session.DB.ES.count( index=session.DB.dbname, @@ -128,8 +128,8 @@ def run(API, environ, indata, session): body = query ) no_commits = res['count'] - - + + # Get number of committers, this period query['aggs'] = { 'authors': { @@ -137,7 +137,7 @@ def run(API, environ, indata, session): 'field': 'author_email' } } - + } res = session.DB.ES.search( index=session.DB.dbname, @@ -146,8 +146,8 @@ def run(API, environ, indata, session): body = query ) no_authors = res['aggregations']['authors']['value'] - - + + #################################################################### # Change to PRIOR SPAN # #################################################################### @@ -173,7 +173,7 @@ def run(API, environ, indata, session): } } } - + # Get number of commits, this period res = session.DB.ES.count( index=session.DB.dbname, @@ -181,7 +181,7 @@ def run(API, environ, indata, session): body = query ) no_commits_before = res['count'] - + # Get number of committers, this period query['aggs'] = { 'authors': { @@ -197,8 +197,8 @@ def run(API, environ, indata, session): body = query ) no_authors_before = res['aggregations']['authors']['value'] - - + + trends = { "authors": { 'before': no_authors_before, @@ -211,7 +211,7 @@ def run(API, environ, indata, session): 'title': "Commits this quarter" } } - + JSON_OUT = { 'trends': trends, 'okay': True, diff --git a/api/pages/session.py b/api/pages/session.py index 425cd89a..d09daabe 100644 --- a/api/pages/session.py +++ b/api/pages/session.py @@ -85,7 +85,7 @@ # $ref: '#/components/schemas/Error' # description: unexpected error # summary: Log in -# +# ######################################################################## @@ -104,9 +104,9 @@ import uuid def run(API, environ, indata, session): - + method = environ['REQUEST_METHOD'] - + # Logging in? if method == "PUT": u = indata['email'] @@ -127,30 +127,30 @@ def run(API, environ, indata, session): session.DB.ES.index(index=session.DB.dbname, doc_type='uisession', id = session.cookie, body = sessionDoc) yield json.dumps({"message": "Logged in OK!"}) return - + # Fall back to a 403 if username and password did not match raise API.exception(403, "Wrong username or password supplied!") - - + + # We need to be logged in for the rest of this! if not session.user: raise API.exception(403, "You must be logged in to use this API endpoint! %s") - + # Delete a session (log out) if method == "DELETE": session.DB.ES.delete(index=session.DB.dbname, doc_type='uisession', id = session.cookie) session.newCookie() yield json.dumps({"message": "Logged out, bye bye!"}) - + # Display the user data for this session if method == "GET": - + # Do we have an API key? If not, make one if not session.user.get('token') or indata.get('newtoken'): token = str(uuid.uuid4()) session.user['token'] = token session.DB.ES.index(index=session.DB.dbname, doc_type='useraccount', id = session.user['email'], body = session.user) - + # Run a quick search of all orgs we have. res = session.DB.ES.search( index=session.DB.dbname, @@ -162,12 +162,12 @@ def run(API, environ, indata, session): } } ) - + orgs = [] for hit in res['hits']['hits']: doc = hit['_source'] orgs.append(doc) - + JSON_OUT = { 'email': session.user['email'], 'displayName': session.user['displayName'], @@ -180,7 +180,6 @@ def run(API, environ, indata, session): } yield json.dumps(JSON_OUT) return - + # Finally, if we hit a method we don't know, balk! yield API.exception(400, "I don't know this request method!!") - \ No newline at end of file diff --git a/api/pages/sources.py b/api/pages/sources.py index 0a46756b..a15a55b0 100644 --- a/api/pages/sources.py +++ b/api/pages/sources.py @@ -114,7 +114,7 @@ # security: # - cookieAuth: [] # summary: Add a new source -# +# ######################################################################## @@ -133,7 +133,7 @@ def canModifySource(session): """ Determine if the user can edit sources in this org """ - + dOrg = session.user['defaultOrganisation'] or "apache" if session.DB.ES.exists(index=session.DB.dbname, doc_type="organisation", id= dOrg): xorg = session.DB.ES.get(index=session.DB.dbname, doc_type="organisation", id= dOrg)['_source'] @@ -144,30 +144,30 @@ def canModifySource(session): return False def run(API, environ, indata, session): - + # We need to be logged in for this! if not session.user: raise API.exception(403, "You must be logged in to use this API endpoint! %s") - + method = environ['REQUEST_METHOD'] dOrg = session.user['defaultOrganisation'] - + if method in ['GET', 'POST']: # Fetch organisation data - + # Make sure we have a default/current org set if 'defaultOrganisation' not in session.user or not session.user['defaultOrganisation']: raise API.exception(400, "You must specify an organisation as default/current in order to add sources.") - + if session.DB.ES.exists(index=session.DB.dbname, doc_type="organisation", id= dOrg): org = session.DB.ES.get(index=session.DB.dbname, doc_type="organisation", id= dOrg)['_source'] del org['admins'] else: raise API.exception(404, "No such organisation, '%s'" % (dOrg or "(None)")) - + sourceTypes = indata.get('types', []) # Fetch all sources for default org - + res = session.DB.ES.search( index=session.DB.dbname, doc_type="source", @@ -180,15 +180,15 @@ def run(API, environ, indata, session): } } ) - + # Secondly, fetch the view if we have such a thing enabled viewList = [] if indata.get('view'): viewList = session.getView(indata.get('view')) if indata.get('subfilter') and indata.get('quick'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - - + viewList = session.subFilter(indata.get('subfilter'), view = viewList) + + sources = [] for hit in res['hits']['hits']: doc = hit['_source'] @@ -208,7 +208,7 @@ def run(API, environ, indata, session): if 'creds' in doc: del doc['creds'] sources.append(doc) - + JSON_OUT = { 'sources': sources, 'okay': True, @@ -216,7 +216,7 @@ def run(API, environ, indata, session): } yield json.dumps(JSON_OUT) return - + # Add one or more sources if method == "PUT": if canModifySource(session): @@ -234,11 +234,11 @@ def run(API, environ, indata, session): if el in source and len(source[el]) > 0: creds[el] = source[el] sourceID = hashlib.sha224( ("%s-%s" % (sourceType, sourceURL)).encode('utf-8') ).hexdigest() - + # Make sure we have a default/current org set if 'defaultOrganisation' not in session.user or not session.user['defaultOrganisation']: raise API.exception(400, "You must first specify an organisation as default/current in order to add sources.") - + doc = { 'organisation': dOrg, 'sourceURL': sourceURL, @@ -259,7 +259,7 @@ def run(API, environ, indata, session): }) else: raise API.exception(403, "You don't have permission to add sources to this organisation.") - + # Delete a source if method == "DELETE": if canModifySource(session): @@ -277,7 +277,7 @@ def run(API, environ, indata, session): raise API.exception(404, "No such source item") else: raise API.exception(403, "You don't have permission to delete this source.") - + # Edit a source if method == "PATCH": pass diff --git a/api/pages/verify.py b/api/pages/verify.py index 0b4d7071..8ef6f4f1 100644 --- a/api/pages/verify.py +++ b/api/pages/verify.py @@ -45,7 +45,7 @@ # application/json: # schema: # $ref: '#/components/schemas/Error' -# +# ######################################################################## @@ -58,17 +58,17 @@ def run(API, environ, indata, session): - + # Get vocde, make sure it's 40 chars vcode = indata.get('vcode') if len(vcode) != 40: raise API.exception(400, "Invalid verification code!") - + # Find the account with this vcode email = indata.get('email') if len(email) < 7: raise API.exception(400, "Invalid email address presented.") - + if session.DB.ES.exists(index=session.DB.dbname, doc_type='useraccount', id = email): doc = session.DB.ES.get(index=session.DB.dbname, doc_type='useraccount', id = email) # Do the codes match?? @@ -81,4 +81,3 @@ def run(API, environ, indata, session): raise API.exception(404, "Invalid verification code presented!") else: raise API.exception(404, "Invalid verification code presented!") # Don't give away if such a user exists, pssst - \ No newline at end of file diff --git a/api/pages/views.py b/api/pages/views.py index 5898e110..bc619f21 100644 --- a/api/pages/views.py +++ b/api/pages/views.py @@ -128,7 +128,7 @@ # security: # - cookieAuth: [] # summary: Add a new view -# +# ######################################################################## @@ -145,14 +145,14 @@ import hashlib def run(API, environ, indata, session): - + # We need to be logged in for this! if not session.user: raise API.exception(403, "You must be logged in to use this API endpoint! %s") - + method = environ['REQUEST_METHOD'] dOrg = session.user['defaultOrganisation'] or "apache" - + # Are we adding a view? if method == 'PUT': viewID = hashlib.sha224( ("%s-%s-%s" % (time.time(), session.user['email'], dOrg) ).encode('utf-8') ).hexdigest() @@ -173,7 +173,7 @@ def run(API, environ, indata, session): } session.DB.ES.index(index=session.DB.dbname, doc_type="view", id = viewID, body = doc) yield json.dumps({'okay': True, 'message': "View created"}) - + # Are we editing (patching) a view? if method == 'PATCH': viewID = indata.get('id') @@ -188,7 +188,7 @@ def run(API, environ, indata, session): raise API.exception(403, "You don't own this view, and cannot edit it.") else: raise API.exception(404, "We couldn't find a view with this ID.") - + # Removing a view? if method == 'DELETE': viewID = indata.get('id') @@ -201,11 +201,11 @@ def run(API, environ, indata, session): raise API.exception(403, "You don't own this view, and cannot delete it.") else: raise API.exception(404, "We couldn't find a view with this ID.") - - + + if method in ['GET', 'POST']: # Fetch all views for default org - + res = session.DB.ES.search( index=session.DB.dbname, doc_type="view", @@ -218,8 +218,8 @@ def run(API, environ, indata, session): } } ) - - + + # Are we looking at someone elses view? if indata.get('view'): viewID = indata.get('view') @@ -229,7 +229,7 @@ def run(API, environ, indata, session): blob['_source']['name'] += " (shared by " + blob['_source']['email'] + ")" res['hits']['hits'].append(blob) sources = [] - + # Include public views?? if not indata.get('sources', False): pres = session.DB.ES.search( @@ -259,7 +259,7 @@ def run(API, environ, indata, session): if hit['_source']['email'] != session.user['email']: hit['_source']['name'] += " (shared view)" res['hits']['hits'].append(hit) - + for hit in res['hits']['hits']: doc = hit['_source'] if doc['organisation'] != dOrg: @@ -273,7 +273,7 @@ def run(API, environ, indata, session): sources.append(xdoc) else: sources.append(doc) - + allsources = [] if indata.get('sources', False): res = session.DB.ES.search( @@ -296,7 +296,7 @@ def run(API, environ, indata, session): 'sourceURL': doc['sourceURL'] } allsources.append(xdoc) - + JSON_OUT = { 'views': sources, 'sources': allsources, diff --git a/api/pages/widgets.py b/api/pages/widgets.py index cebda02a..df5d4389 100644 --- a/api/pages/widgets.py +++ b/api/pages/widgets.py @@ -50,12 +50,12 @@ import json def run(API, environ, indata, session): - + if not session.user: raise API.exception(403, "You must be logged in to use this API endpoint! %s") - + widgets = yaml.load(open("yaml/widgets.yaml")) - + page = indata['pageid'] if not page or page == '0': page = widgets.get('defaultWidget', 'repos') @@ -63,5 +63,3 @@ def run(API, environ, indata, session): yield json.dumps(widgets['widgets'][page]) else: raise API.exception(404, "Widget design not found!") - - \ No newline at end of file diff --git a/api/plugins/database.py b/api/plugins/database.py index 80b94dd1..395808d4 100644 --- a/api/plugins/database.py +++ b/api/plugins/database.py @@ -34,7 +34,7 @@ class KibbleESWrapper(object): """ def __init__(self, ES): self.ES = ES - + def get(self, index, doc_type, id): return self.ES.get(index = index+'_'+doc_type, doc_type = '_doc', id = id) def exists(self, index, doc_type, id): @@ -72,7 +72,7 @@ class KibbleESWrapperSeven(object): """ def __init__(self, ES): self.ES = ES - + def get(self, index, doc_type, id): return self.ES.get(index = index+'_'+doc_type, id = id) def exists(self, index, doc_type, id): @@ -100,7 +100,7 @@ def count(self, index, doc_type = '*', body = None): index = index+'_'+doc_type, body = body ) - + class KibbleDatabase(object): def __init__(self, config): @@ -117,7 +117,7 @@ def __init__(self, config): max_retries=5, retry_on_timeout=True ) - + # IMPORTANT BIT: Figure out if this is ES < 6.x, 6.x or >= 7.x. # If so, we're using the new ES DB mappings, and need to adjust ALL # ES calls to match this. @@ -126,4 +126,3 @@ def __init__(self, config): self.ES = KibbleESWrapperSeven(self.ES) elif self.ESVersion >= 6: self.ES = KibbleESWrapper(self.ES) - diff --git a/api/plugins/openapi.py b/api/plugins/openapi.py index ba6153a5..f2a7e3f0 100644 --- a/api/plugins/openapi.py +++ b/api/plugins/openapi.py @@ -48,7 +48,7 @@ def __init__(self, message): 'POST': '#49cc5c', 'PATCH': '#d5a37e' } - + class OpenAPI(): def __init__(self, APIFile): """ Instantiates an OpenAPI validator given a YAML specification""" @@ -56,26 +56,26 @@ def __init__(self, APIFile): self.API = json.load(open(APIFile)) else: self.API = yaml.load(open(APIFile)) - + def validateType(self, field, value, ftype): """ Validate a single field value against an expected type """ - + # Get type of value, convert to JSON name of type. pyType = type(value).__name__ jsonType = py2JSON[pyType] if pyType in py2JSON else pyType - + # Check if type matches if ftype != jsonType: raise OpenAPIException("OpenAPI mismatch: Field '%s' was expected to be %s, but was really %s!" % (field, ftype, jsonType)) - + def validateSchema(self, pdef, formdata, schema = None): """ Validate (sub)parameters against OpenAPI specs """ - + # allOf: list of schemas to validate against if 'allOf' in pdef: for subdef in pdef['allOf']: self.validateSchema(subdef, formdata) - + where = "JSON body" # Symbolic link?? if 'schema' in pdef: @@ -86,13 +86,13 @@ def validateSchema(self, pdef, formdata, schema = None): # #/foo/bar/baz --> dict['foo']['bar']['baz'] pdef = functools.reduce(operator.getitem, schema.split('/')[1:], self.API) where = "item matching schema %s" % schema - + # Check that all required fields are present if 'required' in pdef: for field in pdef['required']: if not field in formdata: raise OpenAPIException("OpenAPI mismatch: Missing input field '%s' in %s!" % (field, where)) - + # Now check for valid format of input data for field in formdata: if 'properties' not in pdef or field not in pdef['properties'] : @@ -101,7 +101,7 @@ def validateSchema(self, pdef, formdata, schema = None): raise OpenAPIException("OpenAPI mismatch: Field '%s' was found in api.yaml, but no format was specified in specs!" % field) ftype = pdef['properties'][field]['type'] self.validateType(field, formdata[field], ftype) - + # Validate sub-arrays if ftype == 'array' and 'items' in pdef['properties'][field]: for item in formdata[field]: @@ -109,17 +109,17 @@ def validateSchema(self, pdef, formdata, schema = None): self.validateSchema(pdef['properties'][field]['items'], item) else: self.validateType(field, formdata[field], pdef['properties'][field]['items']['type']) - + # Validate sub-hashes if ftype == 'hash' and 'schema' in pdef['properties'][field]: self.validateSchema(pdef['properties'][field], formdata[field]) def validateParameters(self, defs, formdata): # pass - + def validate(self, method = "GET", path = "/foo", formdata = None): """ Validate the request method and input data against the OpenAPI specification """ - + # Make sure we're not dealing with a dynamic URL. # If we find /foo/{key}, we fold that into the form data # and process as if it's a json input field for now. @@ -132,7 +132,7 @@ def validate(self, method = "GET", path = "/foo", formdata = None): formdata[k] = v path = xpath break - + if self.API['paths'].get(path): defs = self.API['paths'].get(path) method = method.lower() @@ -143,15 +143,15 @@ def validate(self, method = "GET", path = "/foo", formdata = None): elif formdata and 'requestBody' not in mdefs: raise OpenAPIException("OpenAPI mismatch: JSON data is now allowed for this request type") elif formdata and 'requestBody' in mdefs and 'content' in mdefs['requestBody']: - + # SHORTCUT: We only care about JSON input for Kibble! Disregard other types if not 'application/json' in mdefs['requestBody']['content']: raise OpenAPIException ("OpenAPI mismatch: API endpoint accepts input, but no application/json definitions found in api.yaml!") jdefs = mdefs['requestBody']['content']['application/json'] - + # Check that required params are here self.validateSchema(jdefs, formdata) - + else: raise OpenAPIException ("OpenAPI mismatch: Method %s is not registered for this API" % method) else: @@ -184,7 +184,7 @@ def dumpExamples(self, pdef, array = False): else: js[k], foo = self.dumpExamples(v['items']) return [js if not array else [js], desc] - + def toHTML(self): """ Blurps out the specs in a pretty HTML blob """ print(""" @@ -217,7 +217,7 @@ def toHTML(self): xjs, desc = self.dumpExamples(pdef) js = json.dumps(xjs, indent = 4) resp += "
%s:\n%s
\n
\n" % (code, js) - + if 'requestBody' in mspec: for ctype, pdef in mspec['requestBody']['content'].items(): xjs, desc = self.dumpExamples(pdef) @@ -226,18 +226,18 @@ def toHTML(self): inpvars += "%s: (%s) %s
\n" % (k, v[0], v[1]) js = json.dumps(xjs, indent = 4) inp += "

Input examples:

%s:\n%s
\n
" % (ctype, js) - + if inpvars: inpvars = "
%s
\n
" % inpvars - + print("""
- +
%s
- + %s
@@ -256,4 +256,4 @@ def toHTML(self):
""" % (linkname, mcolors[method], mcolors[method], mcolors[method], method, path, summary, "block" if inp else "none", inpvars, inp, resp)) #print("%s %s: %s" % (method.upper(), path, mspec['summary'])) - print("") \ No newline at end of file + print("") diff --git a/api/plugins/session.py b/api/plugins/session.py index 68614e13..7d4522bf 100644 --- a/api/plugins/session.py +++ b/api/plugins/session.py @@ -32,13 +32,13 @@ import time class KibbleSession(object): - + def getView(self, viewID): if self.DB.ES.exists(index=self.DB.dbname, doc_type="view", id = viewID): view = self.DB.ES.get(index=self.DB.dbname, doc_type="view", id = viewID) return view['_source']['sourceList'] return [] - + def subFilter(self, subfilter, view = []): if len(subfilter) == 0: return view @@ -57,7 +57,7 @@ def subFilter(self, subfilter, view = []): } }] } - + } } ) @@ -70,7 +70,7 @@ def subFilter(self, subfilter, view = []): if not sources: sources = ['x'] # blank return to not show eeeeverything return sources - + def subType(self, stype, view = []): if len(stype) == 0: return view @@ -96,7 +96,7 @@ def subType(self, stype, view = []): } ] } - + } } ) @@ -109,7 +109,7 @@ def subType(self, stype, view = []): if not sources: sources = ['x'] # blank return to not show eeeeverything return sources - + def logout(self): """Log out user and wipe cookie""" if self.user and self.cookie: @@ -138,11 +138,11 @@ def __init__(self, DB, environ, config): self.DB = DB self.headers = [('Content-Type', 'application/json; charset=utf-8')] self.cookie = None - + # Construct the URL we're visiting self.url = "%s://%s" % (environ['wsgi.url_scheme'], environ.get('HTTP_HOST', environ.get('SERVER_NAME'))) self.url += environ.get('SCRIPT_NAME', '/') - + # Get Kibble cookie cookie = None cookies = None @@ -184,4 +184,3 @@ def __init__(self, DB, environ, config): if not cookie: self.newCookie() self.cookie = cookie - \ No newline at end of file diff --git a/api/yaml/openapi/combine.py b/api/yaml/openapi/combine.py index 11afc958..7f5bc545 100644 --- a/api/yaml/openapi/combine.py +++ b/api/yaml/openapi/combine.py @@ -64,7 +64,7 @@ def deconstruct(): f.write("\n\n") f.write(contents) f.close() - + print("Dumping security components...") for basetype, bdefs in yml['components'].items(): for schema, defs in bdefs.items(): @@ -81,7 +81,7 @@ def deconstruct(): f.write(yaml.dump(defs, default_flow_style=False)) f.close() print("Dumped %u definitions." % noDefs) - + def construct(): yml = {} yml['paths'] = {} @@ -138,7 +138,7 @@ def construct(): f.write(yaml.dump(yml, default_flow_style=False)) f.close() print("All done!") - + if len(sys.argv) > 1 and sys.argv[1] == 'deconstruct': deconstruct() else: diff --git a/api/yaml/openapi/components/schemas/OrgMembers.yaml b/api/yaml/openapi/components/schemas/OrgMembers.yaml index 588e89fa..c4196708 100644 --- a/api/yaml/openapi/components/schemas/OrgMembers.yaml +++ b/api/yaml/openapi/components/schemas/OrgMembers.yaml @@ -13,5 +13,3 @@ properties: required: - admins - members - - diff --git a/api/yaml/openapi/components/schemas/Organisation.yaml b/api/yaml/openapi/components/schemas/Organisation.yaml index abd66cc5..3c7c8a9a 100644 --- a/api/yaml/openapi/components/schemas/Organisation.yaml +++ b/api/yaml/openapi/components/schemas/Organisation.yaml @@ -29,4 +29,3 @@ properties: required: - id - name - diff --git a/api/yaml/openapi/components/schemas/PhraseList.yaml b/api/yaml/openapi/components/schemas/PhraseList.yaml index e2df69b2..701294ef 100644 --- a/api/yaml/openapi/components/schemas/PhraseList.yaml +++ b/api/yaml/openapi/components/schemas/PhraseList.yaml @@ -12,4 +12,3 @@ properties: required: - okay - phrases - diff --git a/api/yaml/openapi/components/schemas/SourceListAdd.yaml b/api/yaml/openapi/components/schemas/SourceListAdd.yaml index 8d82be0b..c1629dd6 100644 --- a/api/yaml/openapi/components/schemas/SourceListAdd.yaml +++ b/api/yaml/openapi/components/schemas/SourceListAdd.yaml @@ -29,7 +29,7 @@ properties: "cookie": "ponycookie" } } - + ] } required: diff --git a/api/yaml/openapi/components/schemas/defaultWidgetArgs.yaml b/api/yaml/openapi/components/schemas/defaultWidgetArgs.yaml index 433b9262..8b74cd7c 100644 --- a/api/yaml/openapi/components/schemas/defaultWidgetArgs.yaml +++ b/api/yaml/openapi/components/schemas/defaultWidgetArgs.yaml @@ -79,4 +79,3 @@ properties: description: Enables relative comparison mode for API endpoints that have this feature. type: boolean example: false - \ No newline at end of file diff --git a/api/yaml/sourcetypes.yaml b/api/yaml/sourcetypes.yaml index 9683c51c..d2b4e018 100644 --- a/api/yaml/sourcetypes.yaml +++ b/api/yaml/sourcetypes.yaml @@ -6,7 +6,7 @@ git: optauth: - username - password - + github: title: "GitHub repository (plus issues/PRs)" description: "This is GitHub repositories with issues and pull requests. For non-GitHub repos, please use the plain 'git' source type" @@ -25,7 +25,7 @@ jira: authrequired: true optauth: - username - - password + - password bugzilla: title: "BugZilla Project" @@ -92,7 +92,7 @@ twitter: - token_secret - consumer_key - consumer_secret - + discourse: title: Discourse description: A Discourse Forum System. @@ -100,4 +100,4 @@ discourse: example: https://discourse.example.com/ optauth: - username - - password \ No newline at end of file + - password diff --git a/docs/Makefile b/docs/Makefile index dd5b2a84..bacc19da 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -17,4 +17,4 @@ help: # Catch-all target: route all unknown targets to Sphinx using the new # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). %: Makefile - @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) \ No newline at end of file + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/source/conf.py b/docs/source/conf.py index bc9edb23..54491833 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -165,6 +165,3 @@ author, 'ApacheKibble', 'One line description of project.', 'Miscellaneous'), ] - - - diff --git a/docs/source/managing.rst b/docs/source/managing.rst index dd577519..65c39510 100644 --- a/docs/source/managing.rst +++ b/docs/source/managing.rst @@ -90,7 +90,7 @@ BugZilla JIRA This is a JIRA project. Most JIRA instances will require the login credentials of an anonymous account in order to perform API calls. - + Twitter This is a Twitter account. Currently not much done there. WIP. diff --git a/docs/source/usecases.rst b/docs/source/usecases.rst index da941b96..629058f1 100644 --- a/docs/source/usecases.rst +++ b/docs/source/usecases.rst @@ -10,10 +10,10 @@ Add an Organisation ********************** This use case describes the process of adding an organisation -Actors: +Actors: User -Precondition: +Precondition: User is logged in Flow of Events: @@ -23,11 +23,11 @@ Flow of Events: 4. The system will verify the information. 5. The system will add the new organisation. 6. The system will then display the new organisation along with any existing organisations. - + Exception Scenario: The user does not enter an organisation name or description. -Post Conditions: +Post Conditions: The user creates the organisation or leaves the page. @@ -36,10 +36,10 @@ Add a View ********************** This use case describes the process of adding a view to an organisation -Actors: +Actors: User -Precondition: +Precondition: User is logged in and has an organisation created Flow of Events: @@ -51,23 +51,23 @@ Flow of Events: 6. The system will add the new view. 7. The system will then display the new view along with any existing views. 8. The user with then be able to edit or delete the view. - + Exception Scenario: The user does not enter a view name. -Post Conditions: +Post Conditions: The user creates the source or leaves the page. - + ********************** Add a Source ********************** This use case describes the process of adding a source to an organisation -Actors: +Actors: User -Precondition: +Precondition: User is logged in and has an organisation created Flow of Events: @@ -79,23 +79,23 @@ Flow of Events: 6. The system will add the new source. 7. The system will then display the new source along with any existing sources. 8. The user with then have to run the kibble scanner to process the new source. - + Exception Scenario: The user does not enter a source URL/ID. -Post Conditions: +Post Conditions: The user creates the source or leaves the page. - + ********************** Add a User ********************** This use case describes the process of adding a user to an organisation -Actors: +Actors: User -Precondition: +Precondition: User is logged in and has an organisation created Flow of Events: @@ -104,11 +104,9 @@ Flow of Events: 3. The user will enter the email address of a user. 4. The system will verify the information. 5. The system will add the user to the organisation's membership. - + Exception Scenario: The user enters a user that does not exist. -Post Conditions: +Post Conditions: The user invites a member or leaves the page. - - diff --git a/setup/kibble.yaml.sample b/setup/kibble.yaml.sample index c523c9e3..c414b5ed 100644 --- a/setup/kibble.yaml.sample +++ b/setup/kibble.yaml.sample @@ -3,7 +3,7 @@ elasticsearch: port: 9200 ssl: false dbname: kibble - + mail: mailhost: localhost mailport: 25 @@ -17,4 +17,3 @@ accounts: - domain: apache.org organisation: apache - diff --git a/setup/makeaccount.py b/setup/makeaccount.py index 67b41516..866c8eeb 100644 --- a/setup/makeaccount.py +++ b/setup/makeaccount.py @@ -74,4 +74,3 @@ def __init__(self, config): } DB.ES.index(index=DB.dbname, doc_type='useraccount', id = username, body = doc) print("Account created!") - diff --git a/setup/setup.py b/setup/setup.py index 4a40fae1..92246754 100644 --- a/setup/setup.py +++ b/setup/setup.py @@ -172,7 +172,7 @@ def create_es_index( # person: contributor DB 'person', ] - + for t in types: iname = f"{dbname}_{t}" print(f"Creating index {iname}") diff --git a/ui/apidoc.html b/ui/apidoc.html index 218050f4..833b7cde 100644 --- a/ui/apidoc.html +++ b/ui/apidoc.html @@ -107,16 +107,16 @@

Overview:

    Input examples:

    application/json:
     {
    @@ -141,14 +141,14 @@ 

    Response examples:

- +
- +
PATCH
- + /api/account
@@ -190,14 +190,14 @@

Response examples:

- +
- +
PUT
- + /api/account
@@ -235,23 +235,23 @@

Response examples:

- +
- +
GET
- + /api/bio/bio
Shows some facts about a contributor

JSON parameters:

- +
- +

Response examples:

@@ -268,14 +268,14 @@

Response examples:

- +
- +
POST
- + /api/bio/bio
@@ -334,23 +334,23 @@

Response examples:

- + - +
- +
POST
- + /api/bio/trends
@@ -433,23 +433,23 @@

Response examples:

- +
- +
GET
- + /api/code/changes
Show insertions/deletions as a timeseries

JSON parameters:

- +
- +

Response examples:

@@ -474,14 +474,14 @@

Response examples:

- +
- +
POST
- + /api/code/changes
@@ -548,23 +548,23 @@

Response examples:

- +
- +
GET
- + /api/code/commits
Show commits as a timeseries

JSON parameters:

- +
- +

Response examples:

@@ -589,14 +589,14 @@

Response examples:

- +
- +
POST
- + /api/code/commits
@@ -663,23 +663,23 @@

Response examples:

- +
- +
GET
- + /api/code/committers
Shows the top N of committers

JSON parameters:

- +
- +

Response examples:

@@ -696,14 +696,14 @@

Response examples:

- +
- +
POST
- + /api/code/committers
@@ -762,23 +762,23 @@

Response examples:

- +
- +
GET
- + /api/code/evolution
Show code evolution as a timeseries

JSON parameters:

- +
- +

Response examples:

@@ -803,14 +803,14 @@

Response examples:

- +
- +
POST
- + /api/code/evolution
@@ -877,23 +877,23 @@

Response examples:

- +
- +
GET
- + /api/code/pony
Shows pony factor data for a set of repos over a given period of time

JSON parameters:

- +
- +

Response examples:

@@ -914,14 +914,14 @@

Response examples:

- +
- +
POST
- + /api/code/pony
@@ -984,23 +984,23 @@

Response examples:

- +
- +
GET
- + /api/code/pony-timeseries
Shows timeseries of Pony Factor over time

JSON parameters:

- +
- +

Response examples:

@@ -1025,14 +1025,14 @@

Response examples:

- +
- +
POST
- + /api/code/pony-timeseries
@@ -1099,23 +1099,23 @@

Response examples:

- +
- +
GET
- + /api/code/relationships
Shows a breakdown of contributor relationships between repositories

JSON parameters:

- +
- +

Response examples:

@@ -1132,14 +1132,14 @@

Response examples:

- +
- +
POST
- + /api/code/relationships
@@ -1198,23 +1198,23 @@

Response examples:

- +
- +
GET
- + /api/code/retention
Shows retention metrics for a set of repos over a given period of time

JSON parameters:

- +
- +

Response examples:

@@ -1235,14 +1235,14 @@

Response examples:

- +
- +
POST
- + /api/code/retention
@@ -1305,23 +1305,23 @@

Response examples:

- +
- +
GET
- + /api/code/sloc
Shows a breakdown of lines of code for one or more sources

JSON parameters:

- +
- +

Response examples:

@@ -1338,14 +1338,14 @@

Response examples:

- +
- +
POST
- + /api/code/sloc
@@ -1404,23 +1404,23 @@

Response examples:

- +
- +
GET
- + /api/code/top-commits
Shows top 25 repos by commit volume

JSON parameters:

- +
- +

Response examples:

@@ -1445,14 +1445,14 @@

Response examples:

- +
- +
POST
- + /api/code/top-commits
@@ -1519,23 +1519,23 @@

Response examples:

- +
- +
GET
- + /api/code/top-sloc
Shows top 25 repos by lines of code

JSON parameters:

- +
- +

Response examples:

@@ -1560,14 +1560,14 @@

Response examples:

- +
- +
POST
- + /api/code/top-sloc
@@ -1634,23 +1634,23 @@

Response examples:

- + - +
- +
POST
- + /api/code/trends
@@ -1733,23 +1733,23 @@

Response examples:

- +
- +
GET
- + /api/issue/actors
Shows timeseries of no. of people opening/closing issues over time

JSON parameters:

- +
- +

Response examples:

@@ -1774,14 +1774,14 @@

Response examples:

- +
- +
POST
- + /api/issue/actors
@@ -1848,23 +1848,23 @@

Response examples:

- +
- +
GET
- + /api/issue/age
Shows timeseries of no. of open tickets by age

JSON parameters:

- +
- +

Response examples:

@@ -1889,14 +1889,14 @@

Response examples:

- +
- +
POST
- + /api/issue/age
@@ -1963,23 +1963,23 @@

Response examples:

- +
- +
GET
- + /api/issue/closers
Shows the top N of issue closers

JSON parameters:

- +
- +

Response examples:

@@ -1996,14 +1996,14 @@

Response examples:

- +
- +
POST
- + /api/issue/closers
@@ -2062,23 +2062,23 @@

Response examples:

- +
- +
GET
- + /api/issue/issues
Shows timeseries of issues opened/closed over time

JSON parameters:

- +
- +

Response examples:

@@ -2103,14 +2103,14 @@

Response examples:

- +
- +
POST
- + /api/issue/issues
@@ -2177,23 +2177,23 @@

Response examples:

- +
- +
GET
- + /api/issue/openers
Shows the top N of issue openers

JSON parameters:

- +
- +

Response examples:

@@ -2210,14 +2210,14 @@

Response examples:

- +
- +
POST
- + /api/issue/openers
@@ -2276,23 +2276,23 @@

Response examples:

- +
- +
GET
- + /api/issue/pony-timeseries
Shows timeseries of Pony Factor over time

JSON parameters:

- +
- +

Response examples:

@@ -2317,14 +2317,14 @@

Response examples:

- +
- +
POST
- + /api/issue/pony-timeseries
@@ -2391,23 +2391,23 @@

Response examples:

- +
- +
GET
- + /api/issue/relationships
Shows a breakdown of contributor relationships between issue trackers

JSON parameters:

- +
- +

Response examples:

@@ -2424,14 +2424,14 @@

Response examples:

- +
- +
POST
- + /api/issue/relationships
@@ -2490,23 +2490,23 @@

Response examples:

- +
- +
GET
- + /api/issue/retention
Shows retention metrics for a set of issue trackers over a given period of time

JSON parameters:

- +
- +

Response examples:

@@ -2527,14 +2527,14 @@

Response examples:

- +
- +
POST
- + /api/issue/retention
@@ -2597,23 +2597,23 @@

Response examples:

- +
- +
GET
- + /api/issue/top
Shows the top N issues by interactions

JSON parameters:

- +
- +

Response examples:

@@ -2634,14 +2634,14 @@

Response examples:

- +
- +
POST
- + /api/issue/top
@@ -2704,23 +2704,23 @@

Response examples:

- +
- +
GET
- + /api/issue/top-count
Shows top 25 issue trackers by issues

JSON parameters:

- +
- +

Response examples:

@@ -2745,14 +2745,14 @@

Response examples:

- +
- +
POST
- + /api/issue/top-count
@@ -2819,23 +2819,23 @@

Response examples:

- + - +
- +
POST
- + /api/issue/trends
@@ -2918,23 +2918,23 @@

Response examples:

- +
- +
GET
- + /api/mail/map
Shows a breakdown of email author reply mappings

JSON parameters:

- +
- +

Response examples:

@@ -2951,14 +2951,14 @@

Response examples:

- +
- +
POST
- + /api/mail/map
@@ -3017,23 +3017,23 @@

Response examples:

- +
- +
GET
- + /api/mail/pony-timeseries
Shows timeseries of Pony Factor over time

JSON parameters:

- +
- +

Response examples:

@@ -3058,14 +3058,14 @@

Response examples:

- +
- +
POST
- + /api/mail/pony-timeseries
@@ -3132,23 +3132,23 @@

Response examples:

- +
- +
GET
- + /api/mail/relationships
Shows a breakdown of contributor relationships between mailing lists

JSON parameters:

- +
- +

Response examples:

@@ -3165,14 +3165,14 @@

Response examples:

- +
- +
POST
- + /api/mail/relationships
@@ -3231,23 +3231,23 @@

Response examples:

- +
- +
GET
- + /api/mail/retention
Shows retention metrics for a set of mailing lists over a given period of time

JSON parameters:

- +
- +

Response examples:

@@ -3268,14 +3268,14 @@

Response examples:

- +
- +
POST
- + /api/mail/retention
@@ -3338,23 +3338,23 @@

Response examples:

- +
- +
GET
- + /api/mail/timeseries
Shows email sent over time

JSON parameters:

- +
- +

Response examples:

@@ -3379,14 +3379,14 @@

Response examples:

- +
- +
POST
- + /api/mail/timeseries
@@ -3453,23 +3453,23 @@

Response examples:

- +
- +
GET
- + /api/mail/timeseries-single
Shows email sent over time

JSON parameters:

- +
- +

Response examples:

@@ -3494,14 +3494,14 @@

Response examples:

- +
- +
POST
- + /api/mail/timeseries-single
@@ -3568,23 +3568,23 @@

Response examples:

- +
- +
GET
- + /api/mail/top-authors
Shows the top N of email authors

JSON parameters:

- +
- +

Response examples:

@@ -3601,14 +3601,14 @@

Response examples:

- +
- +
POST
- + /api/mail/top-authors
@@ -3667,23 +3667,23 @@

Response examples:

- +
- +
GET
- + /api/mail/top-topics
Shows the top N of email authors

JSON parameters:

- +
- +

Response examples:

@@ -3700,14 +3700,14 @@

Response examples:

- +
- +
POST
- + /api/mail/top-topics
@@ -3766,23 +3766,23 @@

Response examples:

- + - +
- +
POST
- + /api/mail/trends
@@ -3865,23 +3865,23 @@

Response examples:

- +
- +
GET
- + /api/org/list
Lists the organisations you belong to (or all, if admin)

JSON parameters:

- +
- +

Response examples:

@@ -3898,14 +3898,14 @@

Response examples:

- +
- +
POST
- + /api/org/list
@@ -3972,14 +3972,14 @@

Response examples:

- +
- +
PUT
- + /api/org/list
@@ -4017,14 +4017,14 @@

Response examples:

- +
- +
DELETE
- + /api/org/members
@@ -4066,23 +4066,23 @@

Response examples:

- +
- +
GET
- + /api/org/members
Lists the members of an organisation

JSON parameters:

- +
- +

Response examples:

@@ -4099,14 +4099,14 @@

Response examples:

- +
- +
POST
- + /api/org/members
@@ -4167,14 +4167,14 @@

Response examples:

- +
- +
PUT
- + /api/org/members
@@ -4216,23 +4216,23 @@

Response examples:

- +
- +
GET
- + /api/org/sourcetypes
Lists the available source types supported by Kibble

JSON parameters:

- +
- +

Response examples:

@@ -4249,14 +4249,14 @@

Response examples:

- +
- +
POST
- + /api/org/sourcetypes
@@ -4315,23 +4315,23 @@

Response examples:

- + - +
- +
POST
- + /api/org/trends
@@ -4414,14 +4414,14 @@

Response examples:

- +
- +
DELETE
- + /api/session
@@ -4454,23 +4454,23 @@

Response examples:

- +
- +
GET
- + /api/session
Display your login details

JSON parameters:

- +
- +

Response examples:

@@ -4496,14 +4496,14 @@

Response examples:

- +
- +
PUT
- + /api/session
@@ -4539,14 +4539,14 @@

Response examples:

- +
- +
DELETE
- + /api/sources
@@ -4567,23 +4567,23 @@

Response examples:

- +
- +
GET
- + /api/sources
Fetches a list of all sources for this organisation

JSON parameters:

- +
- +

Response examples:

@@ -4604,14 +4604,14 @@

Response examples:

- +
- +
PATCH
- + /api/sources
@@ -4649,14 +4649,14 @@

Response examples:

- +
- +
POST
- + /api/sources
@@ -4719,14 +4719,14 @@

Response examples:

- +
- +
PUT
- + /api/sources
@@ -4763,23 +4763,23 @@

Response examples:

- +
- +
GET
- + /api/verify/{email}/{vcode}
Verify an account

JSON parameters:

- +
- +

Response examples:

@@ -4798,21 +4798,21 @@

Response examples:

- +
- +
DELETE
- + /api/views
Delete a new view

JSON parameters:

- +

Input examples:

application/json:
 {}
@@ -4835,23 +4835,23 @@

Response examples:

- +
- +
GET
- + /api/views
Fetches a list of all views (filters) for this user

JSON parameters:

- +
- +

Response examples:

@@ -4883,21 +4883,21 @@

Response examples:

- +
- +
PATCH
- + /api/views
Edit an existing source

JSON parameters:

- +

Input examples:

application/json:
 {}
@@ -4920,14 +4920,14 @@

Response examples:

- +
- +
POST
- + /api/views
@@ -5001,21 +5001,21 @@

Response examples:

- +
- +
PUT
- + /api/views
Add a new view

JSON parameters:

- +

Input examples:

application/json:
 {}
@@ -5038,23 +5038,23 @@

Response examples:

- +
- +
GET
- + /api/widgets/{pageid}
Shows the widget layout for a specific page

JSON parameters:

- +
- +

Response examples:

@@ -5086,5 +5086,5 @@

Response examples:

- + diff --git a/ui/contributors.html b/ui/contributors.html index 086f1d75..b65acce9 100644 --- a/ui/contributors.html +++ b/ui/contributors.html @@ -78,8 +78,8 @@ - - + + @@ -88,4 +88,4 @@ - \ No newline at end of file + diff --git a/ui/css/c3.css b/ui/css/c3.css index be5a9475..fffcd25a 100644 --- a/ui/css/c3.css +++ b/ui/css/c3.css @@ -15,7 +15,7 @@ -webkit-user-select: none; -moz-user-select: none; user-select: none; } - + .c3-legend-item text { font: 12px sans-serif !important; } @@ -113,7 +113,7 @@ z-index: 10; } .c3-tooltip { - + background-color: #fff; empty-cells: show; border-spacing: 0px; @@ -123,7 +123,7 @@ border-radius: 3px; border: 2px solid #333; opacity: 0.9; } - + .c3-tooltip tr { border: 1px solid #CCC; font-family: sans-serif; @@ -276,4 +276,4 @@ .linkedChart { min-height: 600px; -} \ No newline at end of file +} diff --git a/ui/css/daterangepicker.css b/ui/css/daterangepicker.css index dfce19cb..1d643674 100644 --- a/ui/css/daterangepicker.css +++ b/ui/css/daterangepicker.css @@ -229,4 +229,3 @@ float: left; } .daterangepicker .calendar.left { clear: none; } } - diff --git a/ui/css/kibble.min.css b/ui/css/kibble.min.css index 1f046beb..d7673f67 100644 --- a/ui/css/kibble.min.css +++ b/ui/css/kibble.min.css @@ -3100,10 +3100,10 @@ body.error .logo h1 { border: 1.25px solid #3338; line-height: 28px; z-index: 1001; - font: 12px sans-serif; - background: lightgoldenrodyellow; - border-radius: 6px; - pointer-events: none; + font: 12px sans-serif; + background: lightgoldenrodyellow; + border-radius: 6px; + pointer-events: none; } .show-calendar { diff --git a/ui/dashboard.html b/ui/dashboard.html index f93cedbe..0a16ee87 100644 --- a/ui/dashboard.html +++ b/ui/dashboard.html @@ -6,7 +6,7 @@ - + @@ -81,8 +81,8 @@ - - + + @@ -91,4 +91,4 @@ - \ No newline at end of file + diff --git a/ui/engagement.html b/ui/engagement.html index 2fec1b8f..47252a46 100644 --- a/ui/engagement.html +++ b/ui/engagement.html @@ -79,8 +79,8 @@ - - + + @@ -89,4 +89,4 @@ - \ No newline at end of file + diff --git a/ui/index.html b/ui/index.html index 44b17f02..01026f35 100644 --- a/ui/index.html +++ b/ui/index.html @@ -27,8 +27,8 @@ - - + + @@ -42,4 +42,4 @@ Apache Kibble, Kibble, Apache, the Apache feather logo, and the Apache Kibble project logo are either registered trademarks or trademarks of the Apache Software Foundation in the United States and other countries. - \ No newline at end of file + diff --git a/ui/js/app.js b/ui/js/app.js index 17c4c918..2cb1c5e1 100644 --- a/ui/js/app.js +++ b/ui/js/app.js @@ -1025,14 +1025,14 @@ // $('#editModal #fName').val(val1.html()); // $('#editModal #lName').val(val2.html()); // $('#editModal #uName').val(val3.html()); - // - // + // + // // $('#editModal #sbmtBtn').on('click', function() { // val1.html($('#editModal #fName').val()); // val2.html($('#editModal #lName').val()); // val3.html($('#editModal #uName').val()); // }); - // + // // }); /*----------- END action table CODE -------------------------*/ }; @@ -1082,4 +1082,4 @@ }); }; return Metis; -})(jQuery, Metis); \ No newline at end of file +})(jQuery, Metis); diff --git a/ui/js/c3.min.js b/ui/js/c3.min.js index 15506791..b98c158d 100644 --- a/ui/js/c3.min.js +++ b/ui/js/c3.min.js @@ -1 +1 @@ -!function(t,e){"object"==typeof exports&&"undefined"!=typeof module?module.exports=e():"function"==typeof define&&define.amd?define(e):t.c3=e()}(this,function(){"use strict";function t(t,e){function i(t,e){t.attr("transform",function(t){return"translate("+Math.ceil(e(t)+w)+", 0)"})}function n(t,e){t.attr("transform",function(t){return"translate(0,"+Math.ceil(e(t))+")"})}function a(t){var e=t[0],i=t[t.length-1];return e0&&n[0]>0&&n.unshift(n[0]-(n[1]-n[0])),n}function s(){var t,i=_.copy();return e.isCategory&&(t=_.domain(),i.domain([t[0],t[1]-1])),i}function c(t){var e=g?g(t):t;return void 0!==e?e:""}function d(t){if(r)return r;var e={h:11.5,w:5.5};return t.select("text").text(c).each(function(t){var i=this.getBoundingClientRect(),n=c(t),a=i.height,r=n?i.width/n.length:void 0;a&&r&&(e.h=a,e.w=r)}).text(""),r=e,e}function l(i){return e.withoutTransition?i:t.transition(i)}function u(r){r.each(function(){function r(t,i){function n(t,e){r=void 0;for(var s=1;s0?1:-1):N}(j)).style("text-anchor",function(t){return t?t>0?"start":"end":"middle"}(j)).attr("transform",function(t){return t?"rotate("+t+")":""}(j)),H.attr("x",0).attr("dy",g).attr("dx",function(t){return t?8*Math.sin(Math.PI*(t/180)):0}(j)),R.attr("d","M"+I[0]+","+h+"V0H"+I[1]+"V"+h);break;case"top":p=i,D.attr("y2",-y),X.attr("y",-N),F.attr("x2",0).attr("y2",-y),k.attr("x",0).attr("y",-N),M.style("text-anchor","middle"),H.attr("x",0).attr("dy","0em"),R.attr("d","M"+I[0]+","+-h+"V0H"+I[1]+"V"+-h);break;case"left":p=n,D.attr("x2",-y),X.attr("x",-N),F.attr("x2",-y).attr("y1",b).attr("y2",b),k.attr("x",-N).attr("y",w),M.style("text-anchor","end"),H.attr("x",-N).attr("dy",g),R.attr("d","M"+-h+","+I[0]+"H0V"+I[1]+"H"+-h);break;case"right":p=n,D.attr("x2",y),X.attr("x",N),F.attr("x2",y).attr("y2",0),k.attr("x",N).attr("y",0),M.style("text-anchor","start"),H.attr("x",N).attr("dy",g),R.attr("d","M"+h+","+I[0]+"H0V"+I[1]+"H"+h)}if(P.rangeBand){var U=P,W=U.rangeBand()/2;T=P=function(t){return U(t)+W}}else T.rangeBand?T=P:G.call(p,P);V.call(p,T),E.call(p,P)})}var h,g,p,f,_=t.scale.linear(),x="bottom",y=6,m=3,S=null,w=0,v=!0;return e=e||{},h=e.withOuterTick?6:0,u.scale=function(t){return arguments.length?(_=t,u):_},u.orient=function(t){return arguments.length?(x=t in{top:1,right:1,bottom:1,left:1}?t+"":"bottom",u):x},u.tickFormat=function(t){return arguments.length?(g=t,u):g},u.tickCentered=function(t){return arguments.length?(f=t,u):f},u.tickOffset=function(){return w},u.tickInterval=function(){var t;return t=e.isCategory?2*w:(u.g.select("path.domain").node().getTotalLength()-2*h)/u.g.selectAll("line").size(),t===1/0?0:t},u.ticks=function(){return arguments.length?(p=arguments,u):p},u.tickCulling=function(t){return arguments.length?(v=t,u):v},u.tickValues=function(t){if("function"==typeof t)S=function(){return t(_.domain())};else{if(!arguments.length)return S;S=t}return u},u}function e(t){i.call(this,t)}function i(t){this.owner=t}function n(t){var e=this.internal=new a(this);e.loadConfig(t),e.beforeInit(t),e.init(),e.afterInit(t),function t(e,i,n){Object.keys(e).forEach(function(a){i[a]=e[a].bind(n),Object.keys(e[a]).length>0&&t(e[a],i[a],n)})}(b,this,this)}function a(t){var e=this;e.d3=window.d3?window.d3:"undefined"!=typeof require?require("d3"):void 0,e.api=t,e.config=e.getDefaultConfig(),e.data={},e.cache={},e.axes={}}var r,o={target:"c3-target",chart:"c3-chart",chartLine:"c3-chart-line",chartLines:"c3-chart-lines",chartBar:"c3-chart-bar",chartBars:"c3-chart-bars",chartText:"c3-chart-text",chartTexts:"c3-chart-texts",chartArc:"c3-chart-arc",chartArcs:"c3-chart-arcs",chartArcsTitle:"c3-chart-arcs-title",chartArcsBackground:"c3-chart-arcs-background",chartArcsGaugeUnit:"c3-chart-arcs-gauge-unit",chartArcsGaugeMax:"c3-chart-arcs-gauge-max",chartArcsGaugeMin:"c3-chart-arcs-gauge-min",selectedCircle:"c3-selected-circle",selectedCircles:"c3-selected-circles",eventRect:"c3-event-rect",eventRects:"c3-event-rects",eventRectsSingle:"c3-event-rects-single",eventRectsMultiple:"c3-event-rects-multiple",zoomRect:"c3-zoom-rect",brush:"c3-brush",focused:"c3-focused",defocused:"c3-defocused",region:"c3-region",regions:"c3-regions",title:"c3-title",tooltipContainer:"c3-tooltip-container",tooltip:"c3-tooltip",tooltipName:"c3-tooltip-name",shape:"c3-shape",shapes:"c3-shapes",line:"c3-line",lines:"c3-lines",bar:"c3-bar",bars:"c3-bars",circle:"c3-circle",circles:"c3-circles",arc:"c3-arc",arcs:"c3-arcs",area:"c3-area",areas:"c3-areas",empty:"c3-empty",text:"c3-text",texts:"c3-texts",gaugeValue:"c3-gauge-value",grid:"c3-grid",gridLines:"c3-grid-lines",xgrid:"c3-xgrid",xgrids:"c3-xgrids",xgridLine:"c3-xgrid-line",xgridLines:"c3-xgrid-lines",xgridFocus:"c3-xgrid-focus",ygrid:"c3-ygrid",ygrids:"c3-ygrids",ygridLine:"c3-ygrid-line",ygridLines:"c3-ygrid-lines",axis:"c3-axis",axisX:"c3-axis-x",axisXLabel:"c3-axis-x-label",axisY:"c3-axis-y",axisYLabel:"c3-axis-y-label",axisY2:"c3-axis-y2",axisY2Label:"c3-axis-y2-label",legendBackground:"c3-legend-background",legendItem:"c3-legend-item",legendItemEvent:"c3-legend-item-event",legendItemTile:"c3-legend-item-tile",legendItemHidden:"c3-legend-item-hidden",legendItemFocused:"c3-legend-item-focused",dragarea:"c3-dragarea",EXPANDED:"_expanded_",SELECTED:"_selected_",INCLUDED:"_included_"},s="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(t){return typeof t}:function(t){return t&&"function"==typeof Symbol&&t.constructor===Symbol&&t!==Symbol.prototype?"symbol":typeof t},c=function(t){return t||0===t},d=function(t){return"function"==typeof t},l=function(t){return Array.isArray(t)},u=function(t){return"string"==typeof t},h=function(t){return void 0===t},g=function(t){return void 0!==t},p=function(t){return 10*Math.ceil(t/10)},f=function(t){return Math.ceil(t)+.5},_=function(t){return t[1]-t[0]},x=function(t){return void 0===t||null===t||u(t)&&0===t.length||"object"===(void 0===t?"undefined":s(t))&&0===Object.keys(t).length},y=function(t){return!A.isEmpty(t)},m=function(t,e,i){return void 0!==t[e]?t[e]:i},S=function(t,e){var i=!1;return Object.keys(t).forEach(function(n){t[n]===e&&(i=!0)}),i},w=function(t){return"string"==typeof t?t.replace(//g,">"):t},v=function(t){var e=t.getBoundingClientRect(),i=[t.pathSegList.getItem(0),t.pathSegList.getItem(1)];return{x:i[0].x,y:Math.min(i[0].y,i[1].y),width:e.width,height:e.height}};!function(t,e){if(Object.create)e.prototype=Object.create(t.prototype);else{var i=function(){};i.prototype=t.prototype,e.prototype=new i}e.prototype.constructor=e}(i,e),e.prototype.init=function(){var t=this.owner,e=t.config,i=t.main;t.axes.x=i.append("g").attr("class",o.axis+" "+o.axisX).attr("clip-path",t.clipPathForXAxis).attr("transform",t.getTranslate("x")).style("visibility",e.axis_x_show?"visible":"hidden"),t.axes.x.append("text").attr("class",o.axisXLabel).attr("transform",e.axis_rotated?"rotate(-90)":"").style("text-anchor",this.textAnchorForXAxisLabel.bind(this)),t.axes.y=i.append("g").attr("class",o.axis+" "+o.axisY).attr("clip-path",e.axis_y_inner?"":t.clipPathForYAxis).attr("transform",t.getTranslate("y")).style("visibility",e.axis_y_show?"visible":"hidden"),t.axes.y.append("text").attr("class",o.axisYLabel).attr("transform",e.axis_rotated?"":"rotate(-90)").style("text-anchor",this.textAnchorForYAxisLabel.bind(this)),t.axes.y2=i.append("g").attr("class",o.axis+" "+o.axisY2).attr("transform",t.getTranslate("y2")).style("visibility",e.axis_y2_show?"visible":"hidden"),t.axes.y2.append("text").attr("class",o.axisY2Label).attr("transform",e.axis_rotated?"":"rotate(-90)").style("text-anchor",this.textAnchorForY2AxisLabel.bind(this))},e.prototype.getXAxis=function(e,i,n,a,r,o,s){var c=this.owner,d=c.config,l={isCategory:c.isCategorized(),withOuterTick:r,tickMultiline:d.axis_x_tick_multiline,tickWidth:d.axis_x_tick_width,tickTextRotate:s?0:d.axis_x_tick_rotate,withoutTransition:o},u=t(c.d3,l).scale(e).orient(i);return c.isTimeSeries()&&a&&"function"!=typeof a&&(a=a.map(function(t){return c.parseDate(t)})),u.tickFormat(n).tickValues(a),c.isCategorized()&&(u.tickCentered(d.axis_x_tick_centered),x(d.axis_x_tick_culling)&&(d.axis_x_tick_culling=!1)),u},e.prototype.updateXAxisTickValues=function(t,e){var i,n=this.owner,a=n.config;return(a.axis_x_tick_fit||a.axis_x_tick_count)&&(i=this.generateTickValues(n.mapTargetsToUniqueXs(t),a.axis_x_tick_count,n.isTimeSeries())),e?e.tickValues(i):(n.xAxis.tickValues(i),n.subXAxis.tickValues(i)),i},e.prototype.getYAxis=function(e,i,n,a,r,o,s){var c=this.owner,d=c.config,l={withOuterTick:r,withoutTransition:o,tickTextRotate:s?0:d.axis_y_tick_rotate},u=t(c.d3,l).scale(e).orient(i).tickFormat(n);return c.isTimeSeriesY()?u.ticks(c.d3.time[d.axis_y_tick_time_value],d.axis_y_tick_time_interval):u.tickValues(a),u},e.prototype.getId=function(t){var e=this.owner.config;return t in e.data_axes?e.data_axes[t]:"y"},e.prototype.getXAxisTickFormat=function(){var t=this.owner,e=t.config,i=t.isTimeSeries()?t.defaultAxisTimeFormat:t.isCategorized()?t.categoryName:function(t){return t<0?t.toFixed(0):t};return e.axis_x_tick_format&&(d(e.axis_x_tick_format)?i=e.axis_x_tick_format:t.isTimeSeries()&&(i=function(i){return i?t.axisTimeFormat(e.axis_x_tick_format)(i):""})),d(i)?function(e){return i.call(t,e)}:i},e.prototype.getTickValues=function(t,e){return t||(e?e.tickValues():void 0)},e.prototype.getXAxisTickValues=function(){return this.getTickValues(this.owner.config.axis_x_tick_values,this.owner.xAxis)},e.prototype.getYAxisTickValues=function(){return this.getTickValues(this.owner.config.axis_y_tick_values,this.owner.yAxis)},e.prototype.getY2AxisTickValues=function(){return this.getTickValues(this.owner.config.axis_y2_tick_values,this.owner.y2Axis)},e.prototype.getLabelOptionByAxisId=function(t){var e,i=this.owner.config;return"y"===t?e=i.axis_y_label:"y2"===t?e=i.axis_y2_label:"x"===t&&(e=i.axis_x_label),e},e.prototype.getLabelText=function(t){var e=this.getLabelOptionByAxisId(t);return u(e)?e:e?e.text:null},e.prototype.setLabelText=function(t,e){var i=this.owner.config,n=this.getLabelOptionByAxisId(t);u(n)?"y"===t?i.axis_y_label=e:"y2"===t?i.axis_y2_label=e:"x"===t&&(i.axis_x_label=e):n&&(n.text=e)},e.prototype.getLabelPosition=function(t,e){var i=this.getLabelOptionByAxisId(t),n=i&&"object"===(void 0===i?"undefined":s(i))&&i.position?i.position:e;return{isInner:n.indexOf("inner")>=0,isOuter:n.indexOf("outer")>=0,isLeft:n.indexOf("left")>=0,isCenter:n.indexOf("center")>=0,isRight:n.indexOf("right")>=0,isTop:n.indexOf("top")>=0,isMiddle:n.indexOf("middle")>=0,isBottom:n.indexOf("bottom")>=0}},e.prototype.getXAxisLabelPosition=function(){return this.getLabelPosition("x",this.owner.config.axis_rotated?"inner-top":"inner-right")},e.prototype.getYAxisLabelPosition=function(){return this.getLabelPosition("y",this.owner.config.axis_rotated?"inner-right":"inner-top")},e.prototype.getY2AxisLabelPosition=function(){return this.getLabelPosition("y2",this.owner.config.axis_rotated?"inner-right":"inner-top")},e.prototype.getLabelPositionById=function(t){return"y2"===t?this.getY2AxisLabelPosition():"y"===t?this.getYAxisLabelPosition():this.getXAxisLabelPosition()},e.prototype.textForXAxisLabel=function(){return this.getLabelText("x")},e.prototype.textForYAxisLabel=function(){return this.getLabelText("y")},e.prototype.textForY2AxisLabel=function(){return this.getLabelText("y2")},e.prototype.xForAxisLabel=function(t,e){var i=this.owner;return t?e.isLeft?0:e.isCenter?i.width/2:i.width:e.isBottom?-i.height:e.isMiddle?-i.height/2:0},e.prototype.dxForAxisLabel=function(t,e){return t?e.isLeft?"0.5em":e.isRight?"-0.5em":"0":e.isTop?"-0.5em":e.isBottom?"0.5em":"0"},e.prototype.textAnchorForAxisLabel=function(t,e){return t?e.isLeft?"start":e.isCenter?"middle":"end":e.isBottom?"start":e.isMiddle?"middle":"end"},e.prototype.xForXAxisLabel=function(){return this.xForAxisLabel(!this.owner.config.axis_rotated,this.getXAxisLabelPosition())},e.prototype.xForYAxisLabel=function(){return this.xForAxisLabel(this.owner.config.axis_rotated,this.getYAxisLabelPosition())},e.prototype.xForY2AxisLabel=function(){return this.xForAxisLabel(this.owner.config.axis_rotated,this.getY2AxisLabelPosition())},e.prototype.dxForXAxisLabel=function(){return this.dxForAxisLabel(!this.owner.config.axis_rotated,this.getXAxisLabelPosition())},e.prototype.dxForYAxisLabel=function(){return this.dxForAxisLabel(this.owner.config.axis_rotated,this.getYAxisLabelPosition())},e.prototype.dxForY2AxisLabel=function(){return this.dxForAxisLabel(this.owner.config.axis_rotated,this.getY2AxisLabelPosition())},e.prototype.dyForXAxisLabel=function(){var t=this.owner.config,e=this.getXAxisLabelPosition();return t.axis_rotated?e.isInner?"1.2em":-25-this.getMaxTickWidth("x"):e.isInner?"-0.5em":t.axis_x_height?t.axis_x_height-10:"3em"},e.prototype.dyForYAxisLabel=function(){var t=this.owner,e=this.getYAxisLabelPosition();return t.config.axis_rotated?e.isInner?"-0.5em":"3em":e.isInner?"1.2em":-10-(t.config.axis_y_inner?0:this.getMaxTickWidth("y")+10)},e.prototype.dyForY2AxisLabel=function(){var t=this.owner,e=this.getY2AxisLabelPosition();return t.config.axis_rotated?e.isInner?"1.2em":"-2.2em":e.isInner?"-0.5em":15+(t.config.axis_y2_inner?0:this.getMaxTickWidth("y2")+15)},e.prototype.textAnchorForXAxisLabel=function(){var t=this.owner;return this.textAnchorForAxisLabel(!t.config.axis_rotated,this.getXAxisLabelPosition())},e.prototype.textAnchorForYAxisLabel=function(){var t=this.owner;return this.textAnchorForAxisLabel(t.config.axis_rotated,this.getYAxisLabelPosition())},e.prototype.textAnchorForY2AxisLabel=function(){var t=this.owner;return this.textAnchorForAxisLabel(t.config.axis_rotated,this.getY2AxisLabelPosition())},e.prototype.getMaxTickWidth=function(t,e){var i,n,a,r,o=this.owner,s=o.config,c=0;return e&&o.currentMaxTickWidths[t]?o.currentMaxTickWidths[t]:(o.svg&&(i=o.filterTargetsToShow(o.data.targets),"y"===t?(n=o.y.copy().domain(o.getYDomain(i,"y")),a=this.getYAxis(n,o.yOrient,s.axis_y_tick_format,o.yAxisTickValues,!1,!0,!0)):"y2"===t?(n=o.y2.copy().domain(o.getYDomain(i,"y2")),a=this.getYAxis(n,o.y2Orient,s.axis_y2_tick_format,o.y2AxisTickValues,!1,!0,!0)):(n=o.x.copy().domain(o.getXDomain(i)),a=this.getXAxis(n,o.xOrient,o.xAxisTickFormat,o.xAxisTickValues,!1,!0,!0),this.updateXAxisTickValues(i,a)),(r=o.d3.select("body").append("div").classed("c3",!0)).append("svg").style("visibility","hidden").style("position","fixed").style("top",0).style("left",0).append("g").call(a).each(function(){o.d3.select(this).selectAll("text").each(function(){var t=this.getBoundingClientRect();c2){for(o=n-2,a=t[0],s=((r=t[t.length-1])-a)/(o+1),u=[a],c=0;c=0&&D.select(this).style("display",e%V?"none":"block")})}else O.svg.selectAll("."+o.axisX+" .tick text").style("display","block");_=O.generateDrawArea?O.generateDrawArea(X,!1):void 0,x=O.generateDrawBar?O.generateDrawBar(k):void 0,y=O.generateDrawLine?O.generateDrawLine(M,!1):void 0,S=O.generateXYForText(X,k,M,!0),w=O.generateXYForText(X,k,M,!1),i&&(O.subY.domain(O.getYDomain(z,"y")),O.subY2.domain(O.getYDomain(z,"y2"))),O.updateXgridFocus(),R.select("text."+o.text+"."+o.empty).attr("x",O.width/2).attr("y",O.height/2).text(F.data_empty_label_text).transition().style("opacity",z.length?0:1),O.updateGrid(v),O.updateRegion(v),O.updateBar(b),O.updateLine(b),O.updateArea(b),O.updateCircle(),O.hasDataLabel()&&O.updateText(b),O.redrawTitle&&O.redrawTitle(),O.redrawArc&&O.redrawArc(v,b,c),O.redrawSubchart&&O.redrawSubchart(n,e,v,b,X,k,M),R.selectAll("."+o.selectedCircles).filter(O.isBarType.bind(O)).selectAll("circle").remove(),F.interaction_enabled&&!t.flow&&g&&(O.redrawEventRect(),O.updateZoom&&O.updateZoom()),O.updateCircleY(),E=(O.config.axis_rotated?O.circleY:O.circleX).bind(O),I=(O.config.axis_rotated?O.circleX:O.circleY).bind(O),t.flow&&(P=O.generateFlow({targets:z,flow:t.flow,duration:t.flow.duration,drawBar:x,drawLine:y,drawArea:_,cx:E,cy:I,xv:B,xForText:S,yForText:w})),(v||P)&&O.isTabVisible()?D.transition().duration(v).each(function(){var e=[];[O.redrawBar(x,!0),O.redrawLine(y,!0),O.redrawArea(_,!0),O.redrawCircle(E,I,!0),O.redrawText(S,w,t.flow,!0),O.redrawRegion(!0),O.redrawGrid(!0)].forEach(function(t){t.forEach(function(t){e.push(t)})}),T=O.generateWait(),e.forEach(function(t){T.add(t)})}).call(T,function(){P&&P(),F.onrendered&&F.onrendered.call(O)}):(O.redrawBar(x),O.redrawLine(y),O.redrawArea(_),O.redrawCircle(E,I),O.redrawText(S,w,t.flow),O.redrawRegion(),O.redrawGrid(),F.onrendered&&F.onrendered.call(O)),O.mapToIds(O.data.targets).forEach(function(t){O.withoutFadeIn[t]=!0})},A.updateAndRedraw=function(t){var e,i=this,n=i.config;(t=t||{}).withTransition=m(t,"withTransition",!0),t.withTransform=m(t,"withTransform",!1),t.withLegend=m(t,"withLegend",!1),t.withUpdateXDomain=!0,t.withUpdateOrgXDomain=!0,t.withTransitionForExit=!1,t.withTransitionForTransform=m(t,"withTransitionForTransform",t.withTransition),i.updateSizes(),t.withLegend&&n.legend_show||(e=i.axis.generateTransitions(t.withTransitionForAxis?n.transition_duration:0),i.updateScales(),i.updateSvgSize(),i.transformAll(t.withTransitionForTransform,e)),i.redraw(t,e)},A.redrawWithoutRescale=function(){this.redraw({withY:!1,withSubchart:!1,withEventRect:!1,withTransitionForAxis:!1})},A.isTimeSeries=function(){return"timeseries"===this.config.axis_x_type},A.isCategorized=function(){return this.config.axis_x_type.indexOf("categor")>=0},A.isCustomX=function(){var t=this,e=t.config;return!t.isTimeSeries()&&(e.data_x||y(e.data_xs))},A.isTimeSeriesY=function(){return"timeseries"===this.config.axis_y_type},A.getTranslate=function(t){var e,i,n=this,a=n.config;return"main"===t?(e=f(n.margin.left),i=f(n.margin.top)):"context"===t?(e=f(n.margin2.left),i=f(n.margin2.top)):"legend"===t?(e=n.margin3.left,i=n.margin3.top):"x"===t?(e=0,i=a.axis_rotated?0:n.height):"y"===t?(e=0,i=a.axis_rotated?n.height:0):"y2"===t?(e=a.axis_rotated?0:n.width,i=a.axis_rotated?1:0):"subx"===t?(e=0,i=a.axis_rotated?0:n.height2):"arc"===t&&(e=n.arcWidth/2,i=n.arcHeight/2),"translate("+e+","+i+")"},A.initialOpacity=function(t){return null!==t.value&&this.withoutFadeIn[t.id]?1:0},A.initialOpacityForCircle=function(t){return null!==t.value&&this.withoutFadeIn[t.id]?this.opacityForCircle(t):0},A.opacityForCircle=function(t){var e=(d(this.config.point_show)?this.config.point_show(t):this.config.point_show)?1:0;return c(t.value)?this.isScatterType(t)?.5:e:0},A.opacityForText=function(){return this.hasDataLabel()?1:0},A.xx=function(t){return t?this.x(t.x):null},A.xv=function(t){var e=this,i=t.value;return e.isTimeSeries()?i=e.parseDate(t.value):e.isCategorized()&&"string"==typeof t.value&&(i=e.config.axis_x_categories.indexOf(t.value)),Math.ceil(e.x(i))},A.yv=function(t){var e=this,i=t.axis&&"y2"===t.axis?e.y2:e.y;return Math.ceil(i(t.value))},A.subxx=function(t){return t?this.subX(t.x):null},A.transformMain=function(t,e){var i,n,a,r=this;e&&e.axisX?i=e.axisX:(i=r.main.select("."+o.axisX),t&&(i=i.transition())),e&&e.axisY?n=e.axisY:(n=r.main.select("."+o.axisY),t&&(n=n.transition())),e&&e.axisY2?a=e.axisY2:(a=r.main.select("."+o.axisY2),t&&(a=a.transition())),(t?r.main.transition():r.main).attr("transform",r.getTranslate("main")),i.attr("transform",r.getTranslate("x")),n.attr("transform",r.getTranslate("y")),a.attr("transform",r.getTranslate("y2")),r.main.select("."+o.chartArcs).attr("transform",r.getTranslate("arc"))},A.transformAll=function(t,e){var i=this;i.transformMain(t,e),i.config.subchart_show&&i.transformContext(t,e),i.legend&&i.transformLegend(t)},A.updateSvgSize=function(){var t=this,e=t.svg.select(".c3-brush .background");t.svg.attr("width",t.currentWidth).attr("height",t.currentHeight),t.svg.selectAll(["#"+t.clipId,"#"+t.clipIdForGrid]).select("rect").attr("width",t.width).attr("height",t.height),t.svg.select("#"+t.clipIdForXAxis).select("rect").attr("x",t.getXAxisClipX.bind(t)).attr("y",t.getXAxisClipY.bind(t)).attr("width",t.getXAxisClipWidth.bind(t)).attr("height",t.getXAxisClipHeight.bind(t)),t.svg.select("#"+t.clipIdForYAxis).select("rect").attr("x",t.getYAxisClipX.bind(t)).attr("y",t.getYAxisClipY.bind(t)).attr("width",t.getYAxisClipWidth.bind(t)).attr("height",t.getYAxisClipHeight.bind(t)),t.svg.select("#"+t.clipIdForSubchart).select("rect").attr("width",t.width).attr("height",e.size()?e.attr("height"):0),t.svg.select("."+o.zoomRect).attr("width",t.width).attr("height",t.height),t.selectChart.style("max-height",t.currentHeight+"px")},A.updateDimension=function(t){var e=this;t||(e.config.axis_rotated?(e.axes.x.call(e.xAxis),e.axes.subx.call(e.subXAxis)):(e.axes.y.call(e.yAxis),e.axes.y2.call(e.y2Axis))),e.updateSizes(),e.updateScales(),e.updateSvgSize(),e.transformAll(!1)},A.observeInserted=function(t){var e,i=this;"undefined"!=typeof MutationObserver?(e=new MutationObserver(function(n){n.forEach(function(n){"childList"===n.type&&n.previousSibling&&(e.disconnect(),i.intervalForObserveInserted=window.setInterval(function(){t.node().parentNode&&(window.clearInterval(i.intervalForObserveInserted),i.updateDimension(),i.brush&&i.brush.update(),i.config.oninit.call(i),i.redraw({withTransform:!0,withUpdateXDomain:!0,withUpdateOrgXDomain:!0,withTransition:!1,withTransitionForTransform:!1,withLegend:!0}),t.transition().style("opacity",1))},10))})})).observe(t.node(),{attributes:!0,childList:!0,characterData:!0}):window.console.error("MutationObserver not defined.")},A.bindResize=function(){var t=this,e=t.config;if(t.resizeFunction=t.generateResize(),t.resizeFunction.add(function(){e.onresize.call(t)}),e.resize_auto&&t.resizeFunction.add(function(){void 0!==t.resizeTimeout&&window.clearTimeout(t.resizeTimeout),t.resizeTimeout=window.setTimeout(function(){delete t.resizeTimeout,t.api.flush()},100)}),t.resizeFunction.add(function(){e.onresized.call(t)}),window.attachEvent)window.attachEvent("onresize",t.resizeFunction);else if(window.addEventListener)window.addEventListener("resize",t.resizeFunction,!1);else{var i=window.onresize;i?i.add&&i.remove||(i=t.generateResize()).add(window.onresize):i=t.generateResize(),i.add(t.resizeFunction),window.onresize=i}},A.generateResize=function(){function t(){e.forEach(function(t){t()})}var e=[];return t.add=function(t){e.push(t)},t.remove=function(t){for(var i=0;ie.getTotalLength())break;i--}while(i>0);return i})),"SVGPathSegList"in window||(window.SVGPathSegList=function(t){this._pathElement=t,this._list=this._parsePath(this._pathElement.getAttribute("d")),this._mutationObserverConfig={attributes:!0,attributeFilter:["d"]},this._pathElementMutationObserver=new MutationObserver(this._updateListFromPathMutations.bind(this)),this._pathElementMutationObserver.observe(this._pathElement,this._mutationObserverConfig)},window.SVGPathSegList.prototype.classname="SVGPathSegList",Object.defineProperty(window.SVGPathSegList.prototype,"numberOfItems",{get:function(){return this._checkPathSynchronizedToList(),this._list.length},enumerable:!0}),Object.defineProperty(window.SVGPathElement.prototype,"pathSegList",{get:function(){return this._pathSegList||(this._pathSegList=new window.SVGPathSegList(this)),this._pathSegList},enumerable:!0}),Object.defineProperty(window.SVGPathElement.prototype,"normalizedPathSegList",{get:function(){return this.pathSegList},enumerable:!0}),Object.defineProperty(window.SVGPathElement.prototype,"animatedPathSegList",{get:function(){return this.pathSegList},enumerable:!0}),Object.defineProperty(window.SVGPathElement.prototype,"animatedNormalizedPathSegList",{get:function(){return this.pathSegList},enumerable:!0}),window.SVGPathSegList.prototype._checkPathSynchronizedToList=function(){this._updateListFromPathMutations(this._pathElementMutationObserver.takeRecords())},window.SVGPathSegList.prototype._updateListFromPathMutations=function(t){if(this._pathElement){var e=!1;t.forEach(function(t){"d"==t.attributeName&&(e=!0)}),e&&(this._list=this._parsePath(this._pathElement.getAttribute("d")))}},window.SVGPathSegList.prototype._writeListToPath=function(){this._pathElementMutationObserver.disconnect(),this._pathElement.setAttribute("d",window.SVGPathSegList._pathSegArrayAsString(this._list)),this._pathElementMutationObserver.observe(this._pathElement,this._mutationObserverConfig)},window.SVGPathSegList.prototype.segmentChanged=function(t){this._writeListToPath()},window.SVGPathSegList.prototype.clear=function(){this._checkPathSynchronizedToList(),this._list.forEach(function(t){t._owningPathSegList=null}),this._list=[],this._writeListToPath()},window.SVGPathSegList.prototype.initialize=function(t){return this._checkPathSynchronizedToList(),this._list=[t],t._owningPathSegList=this,this._writeListToPath(),t},window.SVGPathSegList.prototype._checkValidIndex=function(t){if(isNaN(t)||t<0||t>=this.numberOfItems)throw"INDEX_SIZE_ERR"},window.SVGPathSegList.prototype.getItem=function(t){return this._checkPathSynchronizedToList(),this._checkValidIndex(t),this._list[t]},window.SVGPathSegList.prototype.insertItemBefore=function(t,e){return this._checkPathSynchronizedToList(),e>this.numberOfItems&&(e=this.numberOfItems),t._owningPathSegList&&(t=t.clone()),this._list.splice(e,0,t),t._owningPathSegList=this,this._writeListToPath(),t},window.SVGPathSegList.prototype.replaceItem=function(t,e){return this._checkPathSynchronizedToList(),t._owningPathSegList&&(t=t.clone()),this._checkValidIndex(e),this._list[e]=t,t._owningPathSegList=this,this._writeListToPath(),t},window.SVGPathSegList.prototype.removeItem=function(t){this._checkPathSynchronizedToList(),this._checkValidIndex(t);var e=this._list[t];return this._list.splice(t,1),this._writeListToPath(),e},window.SVGPathSegList.prototype.appendItem=function(t){return this._checkPathSynchronizedToList(),t._owningPathSegList&&(t=t.clone()),this._list.push(t),t._owningPathSegList=this,this._writeListToPath(),t},window.SVGPathSegList._pathSegArrayAsString=function(t){var e="",i=!0;return t.forEach(function(t){i?(i=!1,e+=t._asPathString()):e+=" "+t._asPathString()}),e},window.SVGPathSegList.prototype._parsePath=function(t){if(!t||0==t.length)return[];var e=this,i=function(){this.pathSegList=[]};i.prototype.appendSegment=function(t){this.pathSegList.push(t)};var n=function(t){this._string=t,this._currentIndex=0,this._endIndex=this._string.length,this._previousCommand=window.SVGPathSeg.PATHSEG_UNKNOWN,this._skipOptionalSpaces()};n.prototype._isCurrentSpace=function(){var t=this._string[this._currentIndex];return t<=" "&&(" "==t||"\n"==t||"\t"==t||"\r"==t||"\f"==t)},n.prototype._skipOptionalSpaces=function(){for(;this._currentIndex="0"&&t<="9")&&e!=window.SVGPathSeg.PATHSEG_CLOSEPATH?e==window.SVGPathSeg.PATHSEG_MOVETO_ABS?window.SVGPathSeg.PATHSEG_LINETO_ABS:e==window.SVGPathSeg.PATHSEG_MOVETO_REL?window.SVGPathSeg.PATHSEG_LINETO_REL:e:window.SVGPathSeg.PATHSEG_UNKNOWN},n.prototype.initialCommandIsMoveTo=function(){if(!this.hasMoreData())return!0;var t=this.peekSegmentType();return t==window.SVGPathSeg.PATHSEG_MOVETO_ABS||t==window.SVGPathSeg.PATHSEG_MOVETO_REL},n.prototype._parseNumber=function(){var t=0,e=0,i=1,n=0,a=1,r=1,o=this._currentIndex;if(this._skipOptionalSpaces(),this._currentIndex"9")&&"."!=this._string.charAt(this._currentIndex))){for(var s=this._currentIndex;this._currentIndex="0"&&this._string.charAt(this._currentIndex)<="9";)this._currentIndex++;if(this._currentIndex!=s)for(var c=this._currentIndex-1,d=1;c>=s;)e+=d*(this._string.charAt(c--)-"0"),d*=10;if(this._currentIndex=this._endIndex||this._string.charAt(this._currentIndex)<"0"||this._string.charAt(this._currentIndex)>"9")return;for(;this._currentIndex="0"&&this._string.charAt(this._currentIndex)<="9";)i*=10,n+=(this._string.charAt(this._currentIndex)-"0")/i,this._currentIndex+=1}if(this._currentIndex!=o&&this._currentIndex+1=this._endIndex||this._string.charAt(this._currentIndex)<"0"||this._string.charAt(this._currentIndex)>"9")return;for(;this._currentIndex="0"&&this._string.charAt(this._currentIndex)<="9";)t*=10,t+=this._string.charAt(this._currentIndex)-"0",this._currentIndex++}var l=e+n;if(l*=a,t&&(l*=Math.pow(10,r*t)),o!=this._currentIndex)return this._skipOptionalSpacesOrDelimiter(),l}},n.prototype._parseArcFlag=function(){if(!(this._currentIndex>=this._endIndex)){var t=!1,e=this._string.charAt(this._currentIndex++);if("0"==e)t=!1;else{if("1"!=e)return;t=!0}return this._skipOptionalSpacesOrDelimiter(),t}},n.prototype.parseSegment=function(){var t=this._string[this._currentIndex],i=this._pathSegTypeFromChar(t);if(i==window.SVGPathSeg.PATHSEG_UNKNOWN){if(this._previousCommand==window.SVGPathSeg.PATHSEG_UNKNOWN)return null;if((i=this._nextCommandHelper(t,this._previousCommand))==window.SVGPathSeg.PATHSEG_UNKNOWN)return null}else this._currentIndex++;switch(this._previousCommand=i,i){case window.SVGPathSeg.PATHSEG_MOVETO_REL:return new window.SVGPathSegMovetoRel(e,this._parseNumber(),this._parseNumber());case window.SVGPathSeg.PATHSEG_MOVETO_ABS:return new window.SVGPathSegMovetoAbs(e,this._parseNumber(),this._parseNumber());case window.SVGPathSeg.PATHSEG_LINETO_REL:return new window.SVGPathSegLinetoRel(e,this._parseNumber(),this._parseNumber());case window.SVGPathSeg.PATHSEG_LINETO_ABS:return new window.SVGPathSegLinetoAbs(e,this._parseNumber(),this._parseNumber());case window.SVGPathSeg.PATHSEG_LINETO_HORIZONTAL_REL:return new window.SVGPathSegLinetoHorizontalRel(e,this._parseNumber());case window.SVGPathSeg.PATHSEG_LINETO_HORIZONTAL_ABS:return new window.SVGPathSegLinetoHorizontalAbs(e,this._parseNumber());case window.SVGPathSeg.PATHSEG_LINETO_VERTICAL_REL:return new window.SVGPathSegLinetoVerticalRel(e,this._parseNumber());case window.SVGPathSeg.PATHSEG_LINETO_VERTICAL_ABS:return new window.SVGPathSegLinetoVerticalAbs(e,this._parseNumber());case window.SVGPathSeg.PATHSEG_CLOSEPATH:return this._skipOptionalSpaces(),new window.SVGPathSegClosePath(e);case window.SVGPathSeg.PATHSEG_CURVETO_CUBIC_REL:return n={x1:this._parseNumber(),y1:this._parseNumber(),x2:this._parseNumber(),y2:this._parseNumber(),x:this._parseNumber(),y:this._parseNumber()},new window.SVGPathSegCurvetoCubicRel(e,n.x,n.y,n.x1,n.y1,n.x2,n.y2);case window.SVGPathSeg.PATHSEG_CURVETO_CUBIC_ABS:return n={x1:this._parseNumber(),y1:this._parseNumber(),x2:this._parseNumber(),y2:this._parseNumber(),x:this._parseNumber(),y:this._parseNumber()},new window.SVGPathSegCurvetoCubicAbs(e,n.x,n.y,n.x1,n.y1,n.x2,n.y2);case window.SVGPathSeg.PATHSEG_CURVETO_CUBIC_SMOOTH_REL:return n={x2:this._parseNumber(),y2:this._parseNumber(),x:this._parseNumber(),y:this._parseNumber()},new window.SVGPathSegCurvetoCubicSmoothRel(e,n.x,n.y,n.x2,n.y2);case window.SVGPathSeg.PATHSEG_CURVETO_CUBIC_SMOOTH_ABS:return n={x2:this._parseNumber(),y2:this._parseNumber(),x:this._parseNumber(),y:this._parseNumber()},new window.SVGPathSegCurvetoCubicSmoothAbs(e,n.x,n.y,n.x2,n.y2);case window.SVGPathSeg.PATHSEG_CURVETO_QUADRATIC_REL:return n={x1:this._parseNumber(),y1:this._parseNumber(),x:this._parseNumber(),y:this._parseNumber()},new window.SVGPathSegCurvetoQuadraticRel(e,n.x,n.y,n.x1,n.y1);case window.SVGPathSeg.PATHSEG_CURVETO_QUADRATIC_ABS:return n={x1:this._parseNumber(),y1:this._parseNumber(),x:this._parseNumber(),y:this._parseNumber()},new window.SVGPathSegCurvetoQuadraticAbs(e,n.x,n.y,n.x1,n.y1);case window.SVGPathSeg.PATHSEG_CURVETO_QUADRATIC_SMOOTH_REL:return new window.SVGPathSegCurvetoQuadraticSmoothRel(e,this._parseNumber(),this._parseNumber());case window.SVGPathSeg.PATHSEG_CURVETO_QUADRATIC_SMOOTH_ABS:return new window.SVGPathSegCurvetoQuadraticSmoothAbs(e,this._parseNumber(),this._parseNumber());case window.SVGPathSeg.PATHSEG_ARC_REL:return n={x1:this._parseNumber(),y1:this._parseNumber(),arcAngle:this._parseNumber(),arcLarge:this._parseArcFlag(),arcSweep:this._parseArcFlag(),x:this._parseNumber(),y:this._parseNumber()},new window.SVGPathSegArcRel(e,n.x,n.y,n.x1,n.y1,n.arcAngle,n.arcLarge,n.arcSweep);case window.SVGPathSeg.PATHSEG_ARC_ABS:var n={x1:this._parseNumber(),y1:this._parseNumber(),arcAngle:this._parseNumber(),arcLarge:this._parseArcFlag(),arcSweep:this._parseArcFlag(),x:this._parseNumber(),y:this._parseNumber()};return new window.SVGPathSegArcAbs(e,n.x,n.y,n.x1,n.y1,n.arcAngle,n.arcLarge,n.arcSweep);default:throw"Unknown path seg type."}};var a=new i,r=new n(t);if(!r.initialCommandIsMoveTo())return[];for(;r.hasMoreData();){var o=r.parseSegment();if(!o)return[];a.appendSegment(o)}return a.pathSegList}),b.axis=function(){},b.axis.labels=function(t){var e=this.internal;arguments.length&&(Object.keys(t).forEach(function(i){e.axis.setLabelText(i,t[i])}),e.axis.updateLabels())},b.axis.max=function(t){var e=this.internal,i=e.config;if(!arguments.length)return{x:i.axis_x_max,y:i.axis_y_max,y2:i.axis_y2_max};"object"===(void 0===t?"undefined":s(t))?(c(t.x)&&(i.axis_x_max=t.x),c(t.y)&&(i.axis_y_max=t.y),c(t.y2)&&(i.axis_y2_max=t.y2)):i.axis_y_max=i.axis_y2_max=t,e.redraw({withUpdateOrgXDomain:!0,withUpdateXDomain:!0})},b.axis.min=function(t){var e=this.internal,i=e.config;if(!arguments.length)return{x:i.axis_x_min,y:i.axis_y_min,y2:i.axis_y2_min};"object"===(void 0===t?"undefined":s(t))?(c(t.x)&&(i.axis_x_min=t.x),c(t.y)&&(i.axis_y_min=t.y),c(t.y2)&&(i.axis_y2_min=t.y2)):i.axis_y_min=i.axis_y2_min=t,e.redraw({withUpdateOrgXDomain:!0,withUpdateXDomain:!0})},b.axis.range=function(t){if(!arguments.length)return{max:this.axis.max(),min:this.axis.min()};void 0!==t.max&&this.axis.max(t.max),void 0!==t.min&&this.axis.min(t.min)},b.category=function(t,e){var i=this.internal,n=i.config;return arguments.length>1&&(n.axis_x_categories[t]=e,i.redraw()),n.axis_x_categories[t]},b.categories=function(t){var e=this.internal,i=e.config;return arguments.length?(i.axis_x_categories=t,e.redraw(),i.axis_x_categories):i.axis_x_categories},b.resize=function(t){var e=this.internal.config;e.size_width=t?t.width:null,e.size_height=t?t.height:null,this.flush()},b.flush=function(){this.internal.updateAndRedraw({withLegend:!0,withTransition:!1,withTransitionForTransform:!1})},b.destroy=function(){var t=this.internal;if(window.clearInterval(t.intervalForObserveInserted),void 0!==t.resizeTimeout&&window.clearTimeout(t.resizeTimeout),window.detachEvent)window.detachEvent("onresize",t.resizeFunction);else if(window.removeEventListener)window.removeEventListener("resize",t.resizeFunction);else{var e=window.onresize;e&&e.add&&e.remove&&e.remove(t.resizeFunction)}return t.selectChart.classed("c3",!1).html(""),Object.keys(t).forEach(function(e){t[e]=null}),null},b.color=function(t){return this.internal.color(t)},b.data=function(t){var e=this.internal.data.targets;return void 0===t?e:e.filter(function(e){return[].concat(t).indexOf(e.id)>=0})},b.data.shown=function(t){return this.internal.filterTargetsToShow(this.data(t))},b.data.values=function(t){var e,i=null;return t&&(i=(e=this.data(t))[0]?e[0].values.map(function(t){return t.value}):null),i},b.data.names=function(t){return this.internal.clearLegendItemTextBoxCache(),this.internal.updateDataAttributes("names",t)},b.data.colors=function(t){return this.internal.updateDataAttributes("colors",t)},b.data.axes=function(t){return this.internal.updateDataAttributes("axes",t)},b.flow=function(t){var e,i,n,a,r,o,s,d=this.internal,l=[],u=d.getMaxDataCount(),h=0,g=0;if(t.json)i=d.convertJsonToData(t.json,t.keys);else if(t.rows)i=d.convertRowsToData(t.rows);else{if(!t.columns)return;i=d.convertColumnsToData(t.columns)}e=d.convertDataToTargets(i,!0),d.data.targets.forEach(function(t){var i,n,a=!1;for(i=0;i1?a.values[a.values.length-1].x-r.x:r.x-d.getXDomain(d.data.targets)[0]:1,n=[r.x-o,r.x],d.updateXDomain(null,!0,!0,!1,n)),d.updateTargets(d.data.targets),d.redraw({flow:{index:r.index,length:h,duration:c(t.duration)?t.duration:d.config.transition_duration,done:t.done,orgDataCount:u},withLegend:!0,withTransition:u>1,withTrimXDomain:!1,withUpdateXAxis:!0})},A.generateFlow=function(t){var e=this,i=e.config,n=e.d3;return function(){var a,r,s,c=t.targets,d=t.flow,l=t.drawBar,u=t.drawLine,h=t.drawArea,g=t.cx,p=t.cy,f=t.xv,x=t.xForText,y=t.yForText,m=t.duration,S=1,w=d.index,v=d.length,b=e.getValueOnIndex(e.data.targets[0].values,w),A=e.getValueOnIndex(e.data.targets[0].values,w+v),T=e.x.domain(),P=d.duration||m,L=d.done||function(){},C=e.generateWait(),V=e.xgrid||n.selectAll([]),G=e.xgridLines||n.selectAll([]),E=e.mainRegion||n.selectAll([]),I=e.mainText||n.selectAll([]),O=e.mainBar||n.selectAll([]),R=e.mainLine||n.selectAll([]),D=e.mainArea||n.selectAll([]),F=e.mainCircle||n.selectAll([]);e.flowing=!0,e.data.targets.forEach(function(t){t.values.splice(0,v)}),s=e.updateXDomain(c,!0,!0),e.updateXGrid&&e.updateXGrid(!0),d.orgDataCount?a=1===d.orgDataCount||(b&&b.x)===(A&&A.x)?e.x(T[0])-e.x(s[0]):e.isTimeSeries()?e.x(T[0])-e.x(s[0]):e.x(b.x)-e.x(A.x):1!==e.data.targets[0].values.length?a=e.x(T[0])-e.x(s[0]):e.isTimeSeries()?(b=e.getValueOnIndex(e.data.targets[0].values,0),A=e.getValueOnIndex(e.data.targets[0].values,e.data.targets[0].values.length-1),a=e.x(b.x)-e.x(A.x)):a=_(s)/2,S=_(T)/_(s),r="translate("+a+",0) scale("+S+",1)",e.hideXGridFocus(),n.transition().ease("linear").duration(P).each(function(){C.add(e.axes.x.transition().call(e.xAxis)),C.add(O.transition().attr("transform",r)),C.add(R.transition().attr("transform",r)),C.add(D.transition().attr("transform",r)),C.add(F.transition().attr("transform",r)),C.add(I.transition().attr("transform",r)),C.add(E.filter(e.isRegionOnX).transition().attr("transform",r)),C.add(V.transition().attr("transform",r)),C.add(G.transition().attr("transform",r))}).call(C,function(){var t,n=[],a=[],r=[];if(v){for(t=0;t=0&&(e=!0)}),!e)}),r.regions},b.selected=function(t){var e=this.internal,i=e.d3;return i.merge(e.main.selectAll("."+o.shapes+e.getTargetSelectorSuffix(t)).selectAll("."+o.shape).filter(function(){return i.select(this).classed(o.SELECTED)}).map(function(t){return t.map(function(t){var e=t.__data__;return e.data?e.data:e})}))},b.select=function(t,e,i){var n=this.internal,a=n.d3,r=n.config;r.data_selection_enabled&&n.main.selectAll("."+o.shapes).selectAll("."+o.shape).each(function(s,c){var d=a.select(this),l=s.data?s.data.id:s.id,u=n.getToggle(this,s).bind(n),h=r.data_selection_grouped||!t||t.indexOf(l)>=0,g=!e||e.indexOf(c)>=0,p=d.classed(o.SELECTED);d.classed(o.line)||d.classed(o.area)||(h&&g?r.data_selection_isselectable(s)&&!p&&u(!0,d.classed(o.SELECTED,!0),s,c):void 0!==i&&i&&p&&u(!1,d.classed(o.SELECTED,!1),s,c))})},b.unselect=function(t,e){var i=this.internal,n=i.d3,a=i.config;a.data_selection_enabled&&i.main.selectAll("."+o.shapes).selectAll("."+o.shape).each(function(r,s){var c=n.select(this),d=r.data?r.data.id:r.id,l=i.getToggle(this,r).bind(i),u=a.data_selection_grouped||!t||t.indexOf(d)>=0,h=!e||e.indexOf(s)>=0,g=c.classed(o.SELECTED);c.classed(o.line)||c.classed(o.area)||u&&h&&a.data_selection_isselectable(r)&&g&&l(!1,c.classed(o.SELECTED,!1),r,s)})},b.show=function(t,e){var i,n=this.internal;t=n.mapToTargetIds(t),e=e||{},n.removeHiddenTargetIds(t),(i=n.svg.selectAll(n.selectorTargets(t))).transition().style("opacity",1,"important").call(n.endall,function(){i.style("opacity",null).style("opacity",1)}),e.withLegend&&n.showLegend(t),n.redraw({withUpdateOrgXDomain:!0,withUpdateXDomain:!0,withLegend:!0})},b.hide=function(t,e){var i,n=this.internal;t=n.mapToTargetIds(t),e=e||{},n.addHiddenTargetIds(t),(i=n.svg.selectAll(n.selectorTargets(t))).transition().style("opacity",0,"important").call(n.endall,function(){i.style("opacity",null).style("opacity",0)}),e.withLegend&&n.hideLegend(t),n.redraw({withUpdateOrgXDomain:!0,withUpdateXDomain:!0,withLegend:!0})},b.toggle=function(t,e){var i=this,n=this.internal;n.mapToTargetIds(t).forEach(function(t){n.isTargetToShow(t)?i.hide(t,e):i.show(t,e)})},b.tooltip=function(){},b.tooltip.show=function(t){var e,i,n=this.internal;t.mouse&&(i=t.mouse),t.data?n.isMultipleX()?(i=[n.x(t.data.x),n.getYScale(t.data.id)(t.data.value)],e=null):e=c(t.data.index)?t.data.index:n.getIndexByX(t.data.x):void 0!==t.x?e=n.getIndexByX(t.x):void 0!==t.index&&(e=t.index),n.dispatchEvent("mouseover",e,i),n.dispatchEvent("mousemove",e,i),n.config.tooltip_onshow.call(n,t.data)},b.tooltip.hide=function(){this.internal.dispatchEvent("mouseout",0),this.internal.config.tooltip_onhide.call(this)},b.transform=function(t,e){var i=this.internal,n=["pie","donut"].indexOf(t)>=0?{withTransform:!0}:null;i.transformTo(e,t,n)},A.transformTo=function(t,e,i){var n=this,a=!n.hasArcType(),r=i||{withTransitionForAxis:a};r.withTransitionForTransform=!1,n.transiting=!1,n.setTargetType(t,e),n.updateTargets(n.data.targets),n.updateAndRedraw(r)},b.x=function(t){var e=this.internal;return arguments.length&&(e.updateTargetX(e.data.targets,t),e.redraw({withUpdateOrgXDomain:!0,withUpdateXDomain:!0})),e.data.xs},b.xs=function(t){var e=this.internal;return arguments.length&&(e.updateTargetXs(e.data.targets,t),e.redraw({withUpdateOrgXDomain:!0,withUpdateXDomain:!0})),e.data.xs},b.zoom=function(t){var e=this.internal;return t&&(e.isTimeSeries()&&(t=t.map(function(t){return e.parseDate(t)})),e.brush.extent(t),e.redraw({withUpdateXDomain:!0,withY:e.config.zoom_rescale}),e.config.zoom_onzoom.call(this,e.x.orgDomain())),e.brush.extent()},b.zoom.enable=function(t){var e=this.internal;e.config.zoom_enabled=t,e.updateAndRedraw()},b.unzoom=function(){var t=this.internal;t.brush.clear().update(),t.redraw({withUpdateXDomain:!0})},b.zoom.max=function(t){var e=this.internal,i=e.config,n=e.d3;if(0!==t&&!t)return i.zoom_x_max;i.zoom_x_max=n.max([e.orgXDomain[1],t])},b.zoom.min=function(t){var e=this.internal,i=e.config,n=e.d3;if(0!==t&&!t)return i.zoom_x_min;i.zoom_x_min=n.min([e.orgXDomain[0],t])},b.zoom.range=function(t){if(!arguments.length)return{max:this.domain.max(),min:this.domain.min()};void 0!==t.max&&this.domain.max(t.max),void 0!==t.min&&this.domain.min(t.min)},A.initPie=function(){var t=this,e=t.d3,i=t.config;t.pie=e.layout.pie().value(function(t){return t.values.reduce(function(t,e){return t+e.value},0)}),i.data_order||t.pie.sort(null)},A.updateRadius=function(){var t=this,e=t.config,i=e.gauge_width||e.donut_width;t.radiusExpanded=Math.min(t.arcWidth,t.arcHeight)/2,t.radius=.95*t.radiusExpanded,t.innerRadiusRatio=i?(t.radius-i)/t.radius:.6,t.innerRadius=t.hasType("donut")||t.hasType("gauge")?t.radius*t.innerRadiusRatio:0},A.updateArc=function(){var t=this;t.svgArc=t.getSvgArc(),t.svgArcExpanded=t.getSvgArcExpanded(),t.svgArcExpandedSub=t.getSvgArcExpanded(.98)},A.updateAngle=function(t){var e,i,n,a,r=this,o=r.config,s=!1,c=0;return o?(r.pie(r.filterTargetsToShow(r.data.targets)).forEach(function(e){s||e.data.id!==t.data.id||(s=!0,(t=e).index=c),c++}),isNaN(t.startAngle)&&(t.startAngle=0),isNaN(t.endAngle)&&(t.endAngle=t.startAngle),r.isGaugeType(t.data)&&(e=o.gauge_min,i=o.gauge_max,n=Math.PI*(o.gauge_fullCircle?2:1)/(i-e),a=t.value.375?1.175-36/o.radius:.8)*o.radius/a:0)+","+n*r+")"),l},A.getArcRatio=function(t){var e=this,i=e.config,n=Math.PI*(e.hasType("gauge")&&!i.gauge_fullCircle?1:2);return t?(t.endAngle-t.startAngle)/n:null},A.convertToArcData=function(t){return this.addName({id:t.data.id,value:t.value,ratio:this.getArcRatio(t),index:t.index})},A.textForArcLabel=function(t){var e,i,n,a,r,o=this;return o.shouldShowArcLabel()?(e=o.updateAngle(t),i=e?e.value:null,n=o.getArcRatio(e),a=t.data.id,o.hasType("gauge")||o.meetsArcLabelThreshold(n)?(r=o.getArcLabelFormat(),r?r(i,n,a):o.defaultArcValueFormat(i,n)):""):""},A.textForGaugeMinMax=function(t,e){var i=this.getGaugeLabelExtents();return i?i(t,e):t},A.expandArc=function(t){var e,i=this;i.transiting?e=window.setInterval(function(){i.transiting||(window.clearInterval(e),i.legend.selectAll(".c3-legend-item-focused").size()>0&&i.expandArc(t))},10):(t=i.mapToTargetIds(t),i.svg.selectAll(i.selectorTargets(t,"."+o.chartArc)).each(function(t){i.shouldExpand(t.data.id)&&i.d3.select(this).selectAll("path").transition().duration(i.expandDuration(t.data.id)).attr("d",i.svgArcExpanded).transition().duration(2*i.expandDuration(t.data.id)).attr("d",i.svgArcExpandedSub).each(function(t){i.isDonutType(t.data)})}))},A.unexpandArc=function(t){var e=this;e.transiting||(t=e.mapToTargetIds(t),e.svg.selectAll(e.selectorTargets(t,"."+o.chartArc)).selectAll("path").transition().duration(function(t){return e.expandDuration(t.data.id)}).attr("d",e.svgArc),e.svg.selectAll("."+o.arc))},A.expandDuration=function(t){var e=this,i=e.config;return e.isDonutType(t)?i.donut_expand_duration:e.isGaugeType(t)?i.gauge_expand_duration:e.isPieType(t)?i.pie_expand_duration:50},A.shouldExpand=function(t){var e=this,i=e.config;return e.isDonutType(t)&&i.donut_expand||e.isGaugeType(t)&&i.gauge_expand||e.isPieType(t)&&i.pie_expand},A.shouldShowArcLabel=function(){var t=this,e=t.config,i=!0;return t.hasType("donut")?i=e.donut_label_show:t.hasType("pie")&&(i=e.pie_label_show),i},A.meetsArcLabelThreshold=function(t){var e=this,i=e.config;return t>=(e.hasType("donut")?i.donut_label_threshold:i.pie_label_threshold)},A.getArcLabelFormat=function(){var t=this,e=t.config,i=e.pie_label_format;return t.hasType("gauge")?i=e.gauge_label_format:t.hasType("donut")&&(i=e.donut_label_format),i},A.getGaugeLabelExtents=function(){return this.config.gauge_label_extents},A.getArcTitle=function(){var t=this;return t.hasType("donut")?t.config.donut_title:""},A.updateTargetsForArc=function(t){var e,i=this,n=i.main,a=i.classChartArc.bind(i),r=i.classArcs.bind(i),s=i.classFocus.bind(i);(e=n.select("."+o.chartArcs).selectAll("."+o.chartArc).data(i.pie(t)).attr("class",function(t){return a(t)+s(t.data)}).enter().append("g").attr("class",a)).append("g").attr("class",r),e.append("text").attr("dy",i.hasType("gauge")?"-.1em":".35em").style("opacity",0).style("text-anchor","middle").style("pointer-events","none")},A.initArc=function(){var t=this;t.arcs=t.main.select("."+o.chart).append("g").attr("class",o.chartArcs).attr("transform",t.getTranslate("arc")),t.arcs.append("text").attr("class",o.chartArcsTitle).style("text-anchor","middle").text(t.getArcTitle())},A.redrawArc=function(t,e,i){var n,a=this,r=a.d3,s=a.config,c=a.main;(n=c.selectAll("."+o.arcs).selectAll("."+o.arc).data(a.arcData.bind(a))).enter().append("path").attr("class",a.classArc.bind(a)).style("fill",function(t){return a.color(t.data)}).style("cursor",function(t){return s.interaction_enabled&&s.data_selection_isselectable(t)?"pointer":null}).each(function(t){a.isGaugeType(t.data)&&(t.startAngle=t.endAngle=s.gauge_startingAngle),this._current=t}),n.attr("transform",function(t){return!a.isGaugeType(t.data)&&i?"scale(0)":""}).on("mouseover",s.interaction_enabled?function(t){var e,i;a.transiting||(e=a.updateAngle(t))&&(i=a.convertToArcData(e),a.expandArc(e.data.id),a.api.focus(e.data.id),a.toggleFocusLegend(e.data.id,!0),a.config.data_onmouseover(i,this))}:null).on("mousemove",s.interaction_enabled?function(t){var e,i=a.updateAngle(t);i&&(e=[a.convertToArcData(i)],a.showTooltip(e,this))}:null).on("mouseout",s.interaction_enabled?function(t){var e,i;a.transiting||(e=a.updateAngle(t))&&(i=a.convertToArcData(e),a.unexpandArc(e.data.id),a.api.revert(),a.revertLegend(),a.hideTooltip(),a.config.data_onmouseout(i,this))}:null).on("click",s.interaction_enabled?function(t,e){var i,n=a.updateAngle(t);n&&(i=a.convertToArcData(n),a.toggleShape&&a.toggleShape(this,i,e),a.config.data_onclick.call(a.api,i,this))}:null).each(function(){a.transiting=!0}).transition().duration(t).attrTween("d",function(t){var e,i=a.updateAngle(t);return i?(isNaN(this._current.startAngle)&&(this._current.startAngle=0),isNaN(this._current.endAngle)&&(this._current.endAngle=this._current.startAngle),e=r.interpolate(this._current,i),this._current=e(0),function(i){var n=e(i);return n.data=t.data,a.getArc(n,!0)}):function(){return"M 0 0"}}).attr("transform",i?"scale(1)":"").style("fill",function(t){return a.levelColor?a.levelColor(t.data.values[0].value):a.color(t.data.id)}).call(a.endall,function(){a.transiting=!1}),n.exit().transition().duration(e).style("opacity",0).remove(),c.selectAll("."+o.chartArc).select("text").style("opacity",0).attr("class",function(t){return a.isGaugeType(t.data)?o.gaugeValue:""}).text(a.textForArcLabel.bind(a)).attr("transform",a.transformForArcLabel.bind(a)).style("font-size",function(t){return a.isGaugeType(t.data)?Math.round(a.radius/5)+"px":""}).transition().duration(t).style("opacity",function(t){return a.isTargetToShow(t.data.id)&&a.isArcType(t.data)?1:0}),c.select("."+o.chartArcsTitle).style("opacity",a.hasType("donut")||a.hasType("gauge")?1:0),a.hasType("gauge")&&(a.arcs.select("."+o.chartArcsBackground).attr("d",function(){var t={data:[{value:s.gauge_max}],startAngle:s.gauge_startingAngle,endAngle:-1*s.gauge_startingAngle};return a.getArc(t,!0,!0)}),a.arcs.select("."+o.chartArcsGaugeUnit).attr("dy",".75em").text(s.gauge_label_show?s.gauge_units:""),a.arcs.select("."+o.chartArcsGaugeMin).attr("dx",-1*(a.innerRadius+(a.radius-a.innerRadius)/(s.gauge_fullCircle?1:2))+"px").attr("dy","1.2em").text(s.gauge_label_show?a.textForGaugeMinMax(s.gauge_min,!1):""),a.arcs.select("."+o.chartArcsGaugeMax).attr("dx",a.innerRadius+(a.radius-a.innerRadius)/(s.gauge_fullCircle?1:2)+"px").attr("dy","1.2em").text(s.gauge_label_show?a.textForGaugeMinMax(s.gauge_max,!0):""))},A.initGauge=function(){var t=this.arcs;this.hasType("gauge")&&(t.append("path").attr("class",o.chartArcsBackground),t.append("text").attr("class",o.chartArcsGaugeUnit).style("text-anchor","middle").style("pointer-events","none"),t.append("text").attr("class",o.chartArcsGaugeMin).style("text-anchor","middle").style("pointer-events","none"),t.append("text").attr("class",o.chartArcsGaugeMax).style("text-anchor","middle").style("pointer-events","none"))},A.getGaugeLabelHeight=function(){return this.config.gauge_label_show?20:0},A.hasCaches=function(t){for(var e=0;e=0?o.focused:"")},A.classDefocused=function(t){return" "+(this.defocusedTargetIds.indexOf(t.id)>=0?o.defocused:"")},A.classChartText=function(t){return o.chartText+this.classTarget(t.id)},A.classChartLine=function(t){return o.chartLine+this.classTarget(t.id)},A.classChartBar=function(t){return o.chartBar+this.classTarget(t.id)},A.classChartArc=function(t){return o.chartArc+this.classTarget(t.data.id)},A.getTargetSelectorSuffix=function(t){return t||0===t?("-"+t).replace(/[\s?!@#$%^&*()_=+,.<>'":;\[\]\/|~`{}\\]/g,"-"):""},A.selectorTarget=function(t,e){return(e||"")+"."+o.target+this.getTargetSelectorSuffix(t)},A.selectorTargets=function(t,e){var i=this;return t=t||[],t.length?t.map(function(t){return i.selectorTarget(t,e)}):null},A.selectorLegend=function(t){return"."+o.legendItem+this.getTargetSelectorSuffix(t)},A.selectorLegends=function(t){var e=this;return t&&t.length?t.map(function(t){return e.selectorLegend(t)}):null},A.getClipPath=function(t){return"url("+(window.navigator.appVersion.toLowerCase().indexOf("msie 9.")>=0?"":document.URL.split("#")[0])+"#"+t+")"},A.appendClip=function(t,e){return t.append("clipPath").attr("id",e).append("rect")},A.getAxisClipX=function(t){var e=Math.max(30,this.margin.left);return t?-(1+e):-(e-1)},A.getAxisClipY=function(t){return t?-20:-this.margin.top},A.getXAxisClipX=function(){var t=this;return t.getAxisClipX(!t.config.axis_rotated)},A.getXAxisClipY=function(){var t=this;return t.getAxisClipY(!t.config.axis_rotated)},A.getYAxisClipX=function(){var t=this;return t.config.axis_y_inner?-1:t.getAxisClipX(t.config.axis_rotated)},A.getYAxisClipY=function(){var t=this;return t.getAxisClipY(t.config.axis_rotated)},A.getAxisClipWidth=function(t){var e=this,i=Math.max(30,e.margin.left),n=Math.max(30,e.margin.right);return t?e.width+2+i+n:e.margin.left+20},A.getAxisClipHeight=function(t){return(t?this.margin.bottom:this.margin.top+this.height)+20},A.getXAxisClipWidth=function(){var t=this;return t.getAxisClipWidth(!t.config.axis_rotated)},A.getXAxisClipHeight=function(){var t=this;return t.getAxisClipHeight(!t.config.axis_rotated)},A.getYAxisClipWidth=function(){var t=this;return t.getAxisClipWidth(t.config.axis_rotated)+(t.config.axis_y_inner?20:0)},A.getYAxisClipHeight=function(){var t=this;return t.getAxisClipHeight(t.config.axis_rotated)},A.generateColor=function(){var t=this,e=t.config,i=t.d3,n=e.data_colors,a=y(e.color_pattern)?e.color_pattern:i.scale.category10().range(),r=e.data_color,o=[];return function(t){var e,i=t.id||t.data&&t.data.id||t;return n[i]instanceof Function?e=n[i](t):n[i]?e=n[i]:(o.indexOf(i)<0&&o.push(i),e=a[o.indexOf(i)%a.length],n[i]=e),r instanceof Function?r(e,t):e}},A.generateLevelColor=function(){var t=this.config,e=t.color_pattern,i=t.color_threshold,n="value"===i.unit,a=i.values&&i.values.length?i.values:[],r=i.max||100;return y(t.color_threshold)?function(t){var i,o=e[e.length-1];for(i=0;i=0?n.data.xs[i]=(e&&n.data.xs[i]?n.data.xs[i]:[]).concat(t.map(function(t){return t[r]}).filter(c).map(function(t,e){return n.generateTargetX(t,i,e)})):a.data_x?n.data.xs[i]=n.getOtherTargetXs():y(a.data_xs)&&(n.data.xs[i]=n.getXValuesOfXKey(r,n.data.targets)):n.data.xs[i]=t.map(function(t,e){return e})}),r.forEach(function(t){if(!n.data.xs[t])throw new Error('x is not defined for id = "'+t+'".')}),(i=r.map(function(e,i){var r=a.data_idConverter(e);return{id:r,id_org:e,values:t.map(function(t,o){var s,c=t[n.getXKey(e)],d=null===t[e]||isNaN(t[e])?null:+t[e];return n.isCustomX()&&n.isCategorized()&&void 0!==c?(0===i&&0===o&&(a.axis_x_categories=[]),-1===(s=a.axis_x_categories.indexOf(c))&&(s=a.axis_x_categories.length,a.axis_x_categories.push(c))):s=n.generateTargetX(c,e,o),(void 0===t[e]||n.data.xs[e].length<=o)&&(s=void 0),{x:s,value:d,id:r}}).filter(function(t){return g(t.x)})}})).forEach(function(t){var e;a.data_xSort&&(t.values=t.values.sort(function(t,e){return(t.x||0===t.x?t.x:1/0)-(e.x||0===e.x?e.x:1/0)})),e=0,t.values.forEach(function(t){t.index=e++}),n.data.xs[t.id].sort(function(t,e){return t-e})}),n.hasNegativeValue=n.hasNegativeValueInTargets(i),n.hasPositiveValue=n.hasPositiveValueInTargets(i),a.data_type&&n.setTargetType(n.mapToIds(i).filter(function(t){return!(t in a.data_types)}),a.data_type),i.forEach(function(t){n.addCache(t.id_org,t)}),i},A.isX=function(t){var e=this.config;return e.data_x&&t===e.data_x||y(e.data_xs)&&S(e.data_xs,t)},A.isNotX=function(t){return!this.isX(t)},A.getXKey=function(t){var e=this.config;return e.data_x?e.data_x:y(e.data_xs)?e.data_xs[t]:null},A.getXValuesOfXKey=function(t,e){var i,n=this;return(e&&y(e)?n.mapToIds(e):[]).forEach(function(e){n.getXKey(e)===t&&(i=n.data.xs[e])}),i},A.getIndexByX=function(t){var e=this,i=e.filterByX(e.data.targets,t);return i.length?i[0].index:null},A.getXValue=function(t,e){var i=this;return t in i.data.xs&&i.data.xs[t]&&c(i.data.xs[t][e])?i.data.xs[t][e]:e},A.getOtherTargetXs=function(){var t=this,e=Object.keys(t.data.xs);return e.length?t.data.xs[e[0]]:null},A.getOtherTargetX=function(t){var e=this.getOtherTargetXs();return e&&t1},A.isMultipleX=function(){return y(this.config.data_xs)||!this.config.data_xSort||this.hasType("scatter")},A.addName=function(t){var e,i=this;return t&&(e=i.config.data_names[t.id],t.name=void 0!==e?e:t.id),t},A.getValueOnIndex=function(t,e){var i=t.filter(function(t){return t.index===e});return i.length?i[0]:null},A.updateTargetX=function(t,e){var i=this;t.forEach(function(t){t.values.forEach(function(n,a){n.x=i.generateTargetX(e[a],t.id,a)}),i.data.xs[t.id]=e})},A.updateTargetXs=function(t,e){var i=this;t.forEach(function(t){e[t.id]&&i.updateTargetX([t],e[t.id])})},A.generateTargetX=function(t,e,i){var n=this;return n.isTimeSeries()?t?n.parseDate(t):n.parseDate(n.getXValue(e,i)):n.isCustomX()&&!n.isCategorized()?c(t)?+t:n.getXValue(e,i):i},A.cloneTarget=function(t){return{id:t.id,id_org:t.id_org,values:t.values.map(function(t){return{x:t.x,value:t.value,id:t.id}})}},A.updateXs=function(){var t=this;t.data.targets.length&&(t.xs=[],t.data.targets[0].values.forEach(function(e){t.xs[e.index]=e.x}))},A.getPrevX=function(t){var e=this.xs[t-1];return void 0!==e?e:null},A.getNextX=function(t){var e=this.xs[t+1];return void 0!==e?e:null},A.getMaxDataCount=function(){var t=this;return t.d3.max(t.data.targets,function(t){return t.values.length})},A.getMaxDataCountTarget=function(t){var e,i=t.length,n=0;return i>1?t.forEach(function(t){t.values.length>n&&(e=t,n=t.values.length)}):e=i?t[0]:null,e},A.getEdgeX=function(t){var e=this;return t.length?[e.d3.min(t,function(t){return t.values[0].x}),e.d3.max(t,function(t){return t.values[t.values.length-1].x})]:[0,0]},A.mapToIds=function(t){return t.map(function(t){return t.id})},A.mapToTargetIds=function(t){var e=this;return t?[].concat(t):e.mapToIds(e.data.targets)},A.hasTarget=function(t,e){var i,n=this.mapToIds(t);for(i=0;ie?1:t>=e?0:NaN})},A.addHiddenTargetIds=function(t){t=t instanceof Array?t:new Array(t);for(var e=0;e0})},A.isOrderDesc=function(){var t=this.config;return"string"==typeof t.data_order&&"desc"===t.data_order.toLowerCase()},A.isOrderAsc=function(){var t=this.config;return"string"==typeof t.data_order&&"asc"===t.data_order.toLowerCase()},A.orderTargets=function(t){var e=this,i=e.config,n=e.isOrderAsc(),a=e.isOrderDesc();return n||a?t.sort(function(t,e){var i=function(t,e){return t+Math.abs(e.value)},a=t.values.reduce(i,0),r=e.values.reduce(i,0);return n?r-a:a-r}):d(i.data_order)?t.sort(i.data_order):l(i.data_order)&&t.sort(function(t,e){return i.data_order.indexOf(t.id)-i.data_order.indexOf(e.id)}),t},A.filterByX=function(t,e){return this.d3.merge(t.map(function(t){return t.values})).filter(function(t){return t.x-e==0})},A.filterRemoveNull=function(t){return t.filter(function(t){return c(t.value)})},A.filterByXDomain=function(t,e){return t.map(function(t){return{id:t.id,id_org:t.id_org,values:t.values.filter(function(t){return e[0]<=t.x&&t.x<=e[1]})}})},A.hasDataLabel=function(){var t=this.config;return!("boolean"!=typeof t.data_labels||!t.data_labels)||!("object"!==s(t.data_labels)||!y(t.data_labels))},A.getDataLabelLength=function(t,e,i){var n=this,a=[0,0];return n.selectChart.select("svg").selectAll(".dummy").data([t,e]).enter().append("text").text(function(t){return n.dataLabelFormat(t.id)(t)}).each(function(t,e){a[e]=1.3*this.getBoundingClientRect()[i]}).remove(),a},A.isNoneArc=function(t){return this.hasTarget(this.data.targets,t.id)},A.isArc=function(t){return"data"in t&&this.hasTarget(this.data.targets,t.data.id)},A.findSameXOfValues=function(t,e){var i,n=t[e].x,a=[];for(i=e-1;i>=0&&n===t[i].x;i--)a.push(t[i]);for(i=e;i0)for(o=s.hasNegativeValueInTargets(t),e=0;e=0})).length)for(n=a[0],o&&l[n]&&l[n].forEach(function(t,e){l[n][e]=t<0?t:0}),i=1;i0||(l[n][e]+=+t)});return s.d3.min(Object.keys(l).map(function(t){return s.d3.min(l[t])}))},A.getYDomainMax=function(t){var e,i,n,a,r,o,s=this,c=s.config,d=s.mapToIds(t),l=s.getValuesAsIdKeyed(t);if(c.data_groups.length>0)for(o=s.hasPositiveValueInTargets(t),e=0;e=0})).length)for(n=a[0],o&&l[n]&&l[n].forEach(function(t,e){l[n][e]=t>0?t:0}),i=1;i=0&&b>=0,g=v<=0&&b<=0,(c(S)&&h||c(w)&&g)&&(T=!1),T&&(h&&(v=0),g&&(b=0)),a=Math.abs(b-v),r=o=.1*a,void 0!==A&&(b=A+(s=Math.max(Math.abs(v),Math.abs(b))),v=A-s),L?(d=p.getDataLabelLength(v,b,"width"),l=_(p.y.range()),r+=a*((u=[d[0]/l,d[1]/l])[1]/(1-u[0]-u[1])),o+=a*(u[0]/(1-u[0]-u[1]))):C&&(d=p.getDataLabelLength(v,b,"height"),r+=p.axis.convertPixelsToAxisPadding(d[1],a),o+=p.axis.convertPixelsToAxisPadding(d[0],a)),"y"===e&&y(f.axis_y_padding)&&(r=p.axis.getPadding(f.axis_y_padding,"top",r,a),o=p.axis.getPadding(f.axis_y_padding,"bottom",o,a)),"y2"===e&&y(f.axis_y2_padding)&&(r=p.axis.getPadding(f.axis_y2_padding,"top",r,a),o=p.axis.getPadding(f.axis_y2_padding,"bottom",o,a)),T&&(h&&(o=v),g&&(r=-b)),n=[v-o,b+r],P?n.reverse():n)},A.getXDomainMin=function(t){var e=this,i=e.config;return void 0!==i.axis_x_min?e.isTimeSeries()?this.parseDate(i.axis_x_min):i.axis_x_min:e.d3.min(t,function(t){return e.d3.min(t.values,function(t){return t.x})})},A.getXDomainMax=function(t){var e=this,i=e.config;return void 0!==i.axis_x_max?e.isTimeSeries()?this.parseDate(i.axis_x_max):i.axis_x_max:e.d3.max(t,function(t){return e.d3.max(t.values,function(t){return t.x})})},A.getXDomainPadding=function(t){var e,i,n,a,r=this,o=r.config,d=t[1]-t[0];return i=r.isCategorized()?0:r.hasType("bar")?(e=r.getMaxDataCount())>1?d/(e-1)/2:.5:.01*d,"object"===s(o.axis_x_padding)&&y(o.axis_x_padding)?(n=c(o.axis_x_padding.left)?o.axis_x_padding.left:i,a=c(o.axis_x_padding.right)?o.axis_x_padding.right:i):n=a="number"==typeof o.axis_x_padding?o.axis_x_padding:i,{left:n,right:a}},A.getXDomain=function(t){var e=this,i=[e.getXDomainMin(t),e.getXDomainMax(t)],n=i[0],a=i[1],r=e.getXDomainPadding(i),o=0,s=0;return n-a!=0||e.isCategorized()||(e.isTimeSeries()?(n=new Date(.5*n.getTime()),a=new Date(1.5*a.getTime())):(n=0===n?1:.5*n,a=0===a?-1:1.5*a)),(n||0===n)&&(o=e.isTimeSeries()?new Date(n.getTime()-r.left):n-r.left),(a||0===a)&&(s=e.isTimeSeries()?new Date(a.getTime()+r.right):a+r.right),[o,s]},A.updateXDomain=function(t,e,i,n,a){var r=this,o=r.config;return i&&(r.x.domain(a||r.d3.extent(r.getXDomain(t))),r.orgXDomain=r.x.domain(),o.zoom_enabled&&r.zoom.scale(r.x).updateScaleExtent(),r.subX.domain(r.x.domain()),r.brush&&r.brush.scale(r.subX)),e&&(r.x.domain(a||(!r.brush||r.brush.empty()?r.orgXDomain:r.brush.extent())),o.zoom_enabled&&r.zoom.scale(r.x).updateScaleExtent()),n&&r.x.domain(r.trimXDomain(r.x.orgDomain())),r.x.domain()},A.trimXDomain=function(t){var e=this.getZoomDomain(),i=e[0],n=e[1];return t[0]<=i&&(t[1]=+t[1]+(i-t[0]),t[0]=i),n<=t[1]&&(t[0]=+t[0]-(t[1]-n),t[1]=n),t},A.drag=function(t){var e,i,n,a,r,s,c,d,l=this,u=l.config,h=l.main,g=l.d3;l.hasArcType()||u.data_selection_enabled&&(u.zoom_enabled&&!l.zoom.altDomain||u.data_selection_multiple&&(e=l.dragStart[0],i=l.dragStart[1],n=t[0],a=t[1],r=Math.min(e,n),s=Math.max(e,n),c=u.data_selection_grouped?l.margin.top:Math.min(i,a),d=u.data_selection_grouped?l.height:Math.max(i,a),h.select("."+o.dragarea).attr("x",r).attr("y",c).attr("width",s-r).attr("height",d-c),h.selectAll("."+o.shapes).selectAll("."+o.shape).filter(function(t){return u.data_selection_isselectable(t)}).each(function(t,e){var i,n,a,u,h,p,f=g.select(this),_=f.classed(o.SELECTED),x=f.classed(o.INCLUDED),y=!1;if(f.classed(o.circle))i=1*f.attr("cx"),n=1*f.attr("cy"),h=l.togglePoint,y=rd&&(c=c.filter(function(t){return(""+t).indexOf(".")<0}));return c},A.getGridFilterToRemove=function(t){return t?function(e){var i=!1;return[].concat(t).forEach(function(t){("value"in t&&e.value===t.value||"class"in t&&e.class===t.class)&&(i=!0)}),i}:function(){return!0}},A.removeGridLines=function(t,e){var i=this,n=i.config,a=i.getGridFilterToRemove(t),r=function(t){return!a(t)},s=e?o.xgridLines:o.ygridLines,c=e?o.xgridLine:o.ygridLine;i.main.select("."+s).selectAll("."+c).filter(a).transition().duration(n.transition_duration).style("opacity",0).remove(),e?n.grid_x_lines=n.grid_x_lines.filter(r):n.grid_y_lines=n.grid_y_lines.filter(r)},A.initEventRect=function(){this.main.select("."+o.chart).append("g").attr("class",o.eventRects).style("fill-opacity",0)},A.redrawEventRect=function(){var t,e,i=this,n=i.config,a=i.isMultipleX(),r=i.main.select("."+o.eventRects).style("cursor",n.zoom_enabled?n.axis_rotated?"ns-resize":"ew-resize":null).classed(o.eventRectsMultiple,a).classed(o.eventRectsSingle,!a);r.selectAll("."+o.eventRect).remove(),i.eventRect=r.selectAll("."+o.eventRect),a?(t=i.eventRect.data([0]),i.generateEventRectsForMultipleXs(t.enter()),i.updateEventRect(t)):(e=i.getMaxDataCountTarget(i.data.targets),r.datum(e?e.values:[]),i.eventRect=r.selectAll("."+o.eventRect),t=i.eventRect.data(function(t){return t}),i.generateEventRectsForSingleX(t.enter()),i.updateEventRect(t),t.exit().remove())},A.updateEventRect=function(t){var e,i,n,a,r,o,s=this,c=s.config;t=t||s.eventRect.data(function(t){return t}),s.isMultipleX()?(e=0,i=0,n=s.width,a=s.height):(!s.isCustomX()&&!s.isTimeSeries()||s.isCategorized()?(r=s.getEventRectWidth(),o=function(t){return s.x(t.x)-r/2}):(s.updateXs(),r=function(t){var e=s.getPrevX(t.index),i=s.getNextX(t.index);return null===e&&null===i?c.axis_rotated?s.height:s.width:(null===e&&(e=s.x.domain()[0]),null===i&&(i=s.x.domain()[1]),Math.max(0,(s.x(i)-s.x(e))/2))},o=function(t){var e=s.getPrevX(t.index),i=s.getNextX(t.index),n=s.data.xs[t.id][t.index];return null===e&&null===i?0:(null===e&&(e=s.x.domain()[0]),(s.x(n)+s.x(e))/2)}),e=c.axis_rotated?0:o,i=c.axis_rotated?o:0,n=c.axis_rotated?s.width:r,a=c.axis_rotated?r:s.height),t.attr("class",s.classEvent.bind(s)).attr("x",e).attr("y",i).attr("width",n).attr("height",a)},A.generateEventRectsForSingleX=function(t){var e=this,i=e.d3,n=e.config;t.append("rect").attr("class",e.classEvent.bind(e)).style("cursor",n.data_selection_enabled&&n.data_selection_grouped?"pointer":null).on("mouseover",function(t){var i=t.index;e.dragging||e.flowing||e.hasArcType()||(n.point_focus_expand_enabled&&e.expandCircles(i,null,!0),e.expandBars(i,null,!0),e.main.selectAll("."+o.shape+"-"+i).each(function(t){n.data_onmouseover.call(e.api,t)}))}).on("mouseout",function(t){var i=t.index;e.config&&(e.hasArcType()||(e.hideXGridFocus(),e.hideTooltip(),e.unexpandCircles(),e.unexpandBars(),e.main.selectAll("."+o.shape+"-"+i).each(function(t){n.data_onmouseout.call(e.api,t)})))}).on("mousemove",function(t){var a,r=t.index,s=e.svg.select("."+o.eventRect+"-"+r);e.dragging||e.flowing||e.hasArcType()||(e.isStepType(t)&&"step-after"===e.config.line_step_type&&i.mouse(this)[0]=0}).classed(o.legendItemFocused,e).transition().duration(100).style("opacity",function(){return(e?i.opacityForLegend:i.opacityForUnfocusedLegend).call(i,i.d3.select(this))})},A.revertLegend=function(){var t=this,e=t.d3;t.legend.selectAll("."+o.legendItem).classed(o.legendItemFocused,!1).transition().duration(100).style("opacity",function(){return t.opacityForLegend(e.select(this))})},A.showLegend=function(t){var e=this,i=e.config;i.legend_show||(i.legend_show=!0,e.legend.style("visibility","visible"),e.legendHasRendered||e.updateLegendWithDefaults()),e.removeHiddenLegendIds(t),e.legend.selectAll(e.selectorLegends(t)).style("visibility","visible").transition().style("opacity",function(){return e.opacityForLegend(e.d3.select(this))})},A.hideLegend=function(t){var e=this,i=e.config;i.legend_show&&x(t)&&(i.legend_show=!1,e.legend.style("visibility","hidden")),e.addHiddenLegendIds(t),e.legend.selectAll(e.selectorLegends(t)).style("opacity",0).style("visibility","hidden")},A.clearLegendItemTextBoxCache=function(){this.legendItemTextBox={}},A.updateLegend=function(t,e,i){function n(t,e){return b.legendItemTextBox[e]||(b.legendItemTextBox[e]=b.getTextRect(t.textContent,o.legendItem,t)),b.legendItemTextBox[e]}function a(e,i,a){function r(t,e){e||(o=(p-E-g)/2)=L)&&(L=u),(!C||h>=C)&&(C=h),s=b.isLegendRight||b.isLegendInset?C:L,A.legend_equally?(Object.keys(O).forEach(function(t){O[t]=L}),Object.keys(R).forEach(function(t){R[t]=C}),(o=(p-s*t.length)/2)0&&0===v.size()&&(v=b.legend.insert("g","."+o.legendItem).attr("class",o.legendBackground).append("rect")),y=b.legend.selectAll("text").data(t).text(function(t){return void 0!==A.data_names[t]?A.data_names[t]:t}).each(function(t,e){a(this,t,e)}),(_?y.transition():y).attr("x",s).attr("y",l),S=b.legend.selectAll("rect."+o.legendItemEvent).data(t),(_?S.transition():S).attr("width",function(t){return O[t]}).attr("height",function(t){return R[t]}).attr("x",c).attr("y",u),w=b.legend.selectAll("line."+o.legendItemTile).data(t),(_?w.transition():w).style("stroke",b.color).attr("x1",h).attr("y1",p).attr("x2",g).attr("y2",p),v&&(_?v.transition():v).attr("height",b.getLegendHeight()-12).attr("width",L*(X+1)+10),b.legend.selectAll("."+o.legendItem).classed(o.legendItemHidden,function(t){return!b.isTargetToShow(t)}),b.updateLegendItemWidth(L),b.updateLegendItemHeight(C),b.updateLegendStep(X),b.updateSizes(),b.updateScales(),b.updateSvgSize(),b.transformAll(x,i),b.legendHasRendered=!0},A.initRegion=function(){var t=this;t.region=t.main.append("g").attr("clip-path",t.clipPath).attr("class",o.regions)},A.updateRegion=function(t){var e=this,i=e.config;e.region.style("visibility",e.hasArcType()?"hidden":"visible"),e.mainRegion=e.main.select("."+o.regions).selectAll("."+o.region).data(i.regions),e.mainRegion.enter().append("g").append("rect").style("fill-opacity",0),e.mainRegion.attr("class",e.classRegion.bind(e)),e.mainRegion.exit().transition().duration(t).style("opacity",0).remove()},A.redrawRegion=function(t){var e=this,i=e.mainRegion.selectAll("rect").each(function(){var t=e.d3.select(this.parentNode).datum();e.d3.select(this).datum(t)}),n=e.regionX.bind(e),a=e.regionY.bind(e),r=e.regionWidth.bind(e),o=e.regionHeight.bind(e);return[(t?i.transition():i).attr("x",n).attr("y",a).attr("width",r).attr("height",o).style("fill-opacity",function(t){return c(t.opacity)?t.opacity:.1})]},A.regionX=function(t){var e=this,i=e.config,n="y"===t.axis?e.y:e.y2;return"y"===t.axis||"y2"===t.axis?i.axis_rotated&&"start"in t?n(t.start):0:i.axis_rotated?0:"start"in t?e.x(e.isTimeSeries()?e.parseDate(t.start):t.start):0},A.regionY=function(t){var e=this,i=e.config,n="y"===t.axis?e.y:e.y2;return"y"===t.axis||"y2"===t.axis?i.axis_rotated?0:"end"in t?n(t.end):0:i.axis_rotated&&"start"in t?e.x(e.isTimeSeries()?e.parseDate(t.start):t.start):0},A.regionWidth=function(t){var e,i=this,n=i.config,a=i.regionX(t),r="y"===t.axis?i.y:i.y2;return e="y"===t.axis||"y2"===t.axis?n.axis_rotated&&"end"in t?r(t.end):i.width:n.axis_rotated?i.width:"end"in t?i.x(i.isTimeSeries()?i.parseDate(t.end):t.end):i.width,ei.bar_width_max?i.bar_width_max:n},A.getBars=function(t,e){var i=this;return(e?i.main.selectAll("."+o.bars+i.getTargetSelectorSuffix(e)):i.main).selectAll("."+o.bar+(c(t)?"-"+t:""))},A.expandBars=function(t,e,i){var n=this;i&&n.unexpandBars(),n.getBars(t,e).classed(o.EXPANDED,!0)},A.unexpandBars=function(t){this.getBars(t).classed(o.EXPANDED,!1)},A.generateDrawBar=function(t,e){var i=this,n=i.config,a=i.generateGetBarPoints(t,e);return function(t,e){var i=a(t,e),r=n.axis_rotated?1:0,o=n.axis_rotated?0:1;return"M "+i[0][r]+","+i[0][o]+" L"+i[1][r]+","+i[1][o]+" L"+i[2][r]+","+i[2][o]+" L"+i[3][r]+","+i[3][o]+" z"}},A.generateGetBarPoints=function(t,e){var i=this,n=e?i.subXAxis:i.xAxis,a=t.__max__+1,r=i.getBarW(n,a),o=i.getShapeX(r,a,t,!!e),s=i.getShapeY(!!e),c=i.getShapeOffset(i.isBarType,t,!!e),d=r*(i.config.bar_space/2),l=e?i.getSubYScale:i.getYScale;return function(t,e){var n=l.call(i,t.id)(0),a=c(t,e)||n,u=o(t),h=s(t);return i.config.axis_rotated&&(0=0&&(d+=s(a[o].value)-c))}),d}},A.isWithinShape=function(t,e){var i,n=this,a=n.d3.select(t);return n.isTargetToShow(e.id)?"circle"===t.nodeName?i=n.isStepType(e)?n.isWithinStep(t,n.getYScale(e.id)(e.value)):n.isWithinCircle(t,1.5*n.pointSelectR(e)):"path"===t.nodeName&&(i=!a.classed(o.bar)||n.isWithinBar(t)):i=!1,i},A.getInterpolate=function(t){var e=this,i=e.isInterpolationType(e.config.spline_interpolation_type)?e.config.spline_interpolation_type:"cardinal";return e.isSplineType(t)?i:e.isStepType(t)?e.config.line_step_type:"linear"},A.initLine=function(){this.main.select("."+o.chart).append("g").attr("class",o.chartLines)},A.updateTargetsForLine=function(t){var e,i=this,n=i.config,a=i.classChartLine.bind(i),r=i.classLines.bind(i),s=i.classAreas.bind(i),c=i.classCircles.bind(i),d=i.classFocus.bind(i);(e=i.main.select("."+o.chartLines).selectAll("."+o.chartLine).data(t).attr("class",function(t){return a(t)+d(t)}).enter().append("g").attr("class",a).style("opacity",0).style("pointer-events","none")).append("g").attr("class",r),e.append("g").attr("class",s),e.append("g").attr("class",function(t){return i.generateClass(o.selectedCircles,t.id)}),e.append("g").attr("class",c).style("cursor",function(t){return n.data_selection_isselectable(t)?"pointer":null}),t.forEach(function(t){i.main.selectAll("."+o.selectedCircles+i.getTargetSelectorSuffix(t.id)).selectAll("."+o.selectedCircle).each(function(e){e.value=t.values[e.index].value})})},A.updateLine=function(t){var e=this;e.mainLine=e.main.selectAll("."+o.lines).selectAll("."+o.line).data(e.lineData.bind(e)),e.mainLine.enter().append("path").attr("class",e.classLine.bind(e)).style("stroke",e.color),e.mainLine.style("opacity",e.initialOpacity.bind(e)).style("shape-rendering",function(t){return e.isStepType(t)?"crispEdges":""}).attr("transform",null),e.mainLine.exit().transition().duration(t).style("opacity",0).remove()},A.redrawLine=function(t,e){return[(e?this.mainLine.transition(Math.random().toString()):this.mainLine).attr("d",t).style("stroke",this.color).style("opacity",1)]},A.generateDrawLine=function(t,e){var i=this,n=i.config,a=i.d3.svg.line(),r=i.generateGetLinePoints(t,e),o=e?i.getSubYScale:i.getYScale,s=function(t){return(e?i.subxx:i.xx).call(i,t)},c=function(t,e){return n.data_groups.length>0?r(t,e)[0][1]:o.call(i,t.id)(t.value)};return a=n.axis_rotated?a.x(c).y(s):a.x(s).y(c),n.line_connectNull||(a=a.defined(function(t){return null!=t.value})),function(t){var r,s=n.line_connectNull?i.filterRemoveNull(t.values):t.values,c=e?i.x:i.subX,d=o.call(i,t.id),l=0,u=0;return i.isLineType(t)?n.data_regions[t.id]?r=i.lineWithRegions(s,c,d,n.data_regions[t.id]):(i.isStepType(t)&&(s=i.convertValuesToStep(s)),r=a.interpolate(i.getInterpolate(t))(s)):(s[0]&&(l=c(s[0].x),u=d(s[0].value)),r=n.axis_rotated?"M "+u+" "+l:"M "+l+" "+u),r||"M 0 0"}},A.generateGetLinePoints=function(t,e){var i=this,n=i.config,a=t.__max__+1,r=i.getShapeX(0,a,t,!!e),o=i.getShapeY(!!e),s=i.getShapeOffset(i.isLineType,t,!!e),c=e?i.getSubYScale:i.getYScale;return function(t,e){var a=c.call(i,t.id)(0),d=s(t,e)||a,l=r(t),u=o(t);return n.axis_rotated&&(00?r(t,e)[0][1]:o.call(i,t.id)(i.getAreaBaseValue(t.id))},d=function(t,e){return n.data_groups.length>0?r(t,e)[1][1]:o.call(i,t.id)(t.value)};return a=n.axis_rotated?a.x0(c).x1(d).y(s):a.x(s).y0(n.area_above?0:c).y1(d),n.line_connectNull||(a=a.defined(function(t){return null!==t.value})),function(t){var e,r=n.line_connectNull?i.filterRemoveNull(t.values):t.values,o=0,s=0;return i.isAreaType(t)?(i.isStepType(t)&&(r=i.convertValuesToStep(r)),e=a.interpolate(i.getInterpolate(t))(r)):(r[0]&&(o=i.x(r[0].x),s=i.getYScale(t.id)(r[0].value)),e=n.axis_rotated?"M "+s+" "+o:"M "+o+" "+s),e||"M 0 0"}},A.getAreaBaseValue=function(){return 0},A.generateGetAreaPoints=function(t,e){var i=this,n=i.config,a=t.__max__+1,r=i.getShapeX(0,a,t,!!e),o=i.getShapeY(!!e),s=i.getShapeOffset(i.isAreaType,t,!!e),c=e?i.getSubYScale:i.getYScale;return function(t,e){var a=c.call(i,t.id)(0),d=s(t,e)||a,l=r(t),u=o(t);return n.axis_rotated&&(00?(t=i.getShapeIndices(i.isLineType),e=i.generateGetLinePoints(t),i.circleY=function(t,i){return e(t,i)[0][1]}):i.circleY=function(t){return i.getYScale(t.id)(t.value)}},A.getCircles=function(t,e){var i=this;return(e?i.main.selectAll("."+o.circles+i.getTargetSelectorSuffix(e)):i.main).selectAll("."+o.circle+(c(t)?"-"+t:""))},A.expandCircles=function(t,e,i){var n=this,a=n.pointExpandedR.bind(n);i&&n.unexpandCircles(),n.getCircles(t,e).classed(o.EXPANDED,!0).attr("r",a)},A.unexpandCircles=function(t){var e=this,i=e.pointR.bind(e);e.getCircles(t).filter(function(){return e.d3.select(this).classed(o.EXPANDED)}).classed(o.EXPANDED,!1).attr("r",i)},A.pointR=function(t){var e=this,i=e.config;return e.isStepType(t)?0:d(i.point_r)?i.point_r(t):i.point_r},A.pointExpandedR=function(t){var e=this,i=e.config;return i.point_focus_expand_enabled?i.point_focus_expand_r?i.point_focus_expand_r:1.75*e.pointR(t):e.pointR(t)},A.pointSelectR=function(t){var e=this,i=e.config;return d(i.point_select_r)?i.point_select_r(t):i.point_select_r?i.point_select_r:4*e.pointR(t)},A.isWithinCircle=function(t,e){var i=this.d3,n=i.mouse(t),a=i.select(t),r=+a.attr("cx"),o=+a.attr("cy");return Math.sqrt(Math.pow(r-n[0],2)+Math.pow(o-n[1],2))0?i:320/(t.hasType("gauge")&&!e.gauge_fullCircle?2:1)},A.getCurrentPaddingTop=function(){var t=this,e=t.config,i=c(e.padding_top)?e.padding_top:0;return t.title&&t.title.node()&&(i+=t.getTitlePadding()),i},A.getCurrentPaddingBottom=function(){var t=this.config;return c(t.padding_bottom)?t.padding_bottom:0},A.getCurrentPaddingLeft=function(t){var e=this,i=e.config;return c(i.padding_left)?i.padding_left:i.axis_rotated?i.axis_x_show?Math.max(p(e.getAxisWidthByAxisId("x",t)),40):1:!i.axis_y_show||i.axis_y_inner?e.axis.getYAxisLabelPosition().isOuter?30:1:p(e.getAxisWidthByAxisId("y",t))},A.getCurrentPaddingRight=function(){var t=this,e=t.config,i=t.isLegendRight?t.getLegendWidth()+20:0;return c(e.padding_right)?e.padding_right+1:e.axis_rotated?10+i:!e.axis_y2_show||e.axis_y2_inner?2+i+(t.axis.getY2AxisLabelPosition().isOuter?20:0):p(t.getAxisWidthByAxisId("y2"))+i},A.getParentRectValue=function(t){for(var e,i=this.selectChart.node();i&&"BODY"!==i.tagName;){try{e=i.getBoundingClientRect()[t]}catch(n){"width"===t&&(e=i.offsetWidth)}if(e)break;i=i.parentNode}return e},A.getParentWidth=function(){return this.getParentRectValue("width")},A.getParentHeight=function(){var t=this.selectChart.style("height");return t.indexOf("px")>0?+t.replace("px",""):0},A.getSvgLeft=function(t){var e=this,i=e.config,n=i.axis_rotated||!i.axis_rotated&&!i.axis_y_inner,a=i.axis_rotated?o.axisX:o.axisY,r=e.main.select("."+a).node(),s=r&&n?r.getBoundingClientRect():{right:0},c=e.selectChart.node().getBoundingClientRect(),d=e.hasArcType(),l=s.right-c.left-(d?0:e.getCurrentPaddingLeft(t));return l>0?l:0},A.getAxisWidthByAxisId=function(t,e){var i=this,n=i.axis.getLabelPositionById(t);return i.axis.getMaxTickWidth(t,e)+(n.isInner?20:40)},A.getHorizontalAxisHeight=function(t){var e=this,i=e.config,n=30;return"x"!==t||i.axis_x_show?"x"===t&&i.axis_x_height?i.axis_x_height:"y"!==t||i.axis_y_show?"y2"!==t||i.axis_y2_show?("x"===t&&!i.axis_rotated&&i.axis_x_tick_rotate&&(n=30+e.axis.getMaxTickWidth(t)*Math.cos(Math.PI*(90-i.axis_x_tick_rotate)/180)),"y"===t&&i.axis_rotated&&i.axis_y_tick_rotate&&(n=30+e.axis.getMaxTickWidth(t)*Math.cos(Math.PI*(90-i.axis_y_tick_rotate)/180)),n+(e.axis.getLabelPositionById(t).isInner?0:10)+("y2"===t?-10:0)):e.rotated_padding_top:!i.legend_show||e.isLegendRight||e.isLegendInset?1:10:8},A.getEventRectWidth=function(){return Math.max(0,this.xAxis.tickInterval())},A.initBrush=function(){var t=this,e=t.d3;t.brush=e.svg.brush().on("brush",function(){t.redrawForBrush()}),t.brush.update=function(){return t.context&&t.context.select("."+o.brush).call(this),this},t.brush.scale=function(e){return t.config.axis_rotated?this.y(e):this.x(e)}},A.initSubchart=function(){var t=this,e=t.config,i=t.context=t.svg.append("g").attr("transform",t.getTranslate("context")),n=e.subchart_show?"visible":"hidden";i.style("visibility",n),i.append("g").attr("clip-path",t.clipPathForSubchart).attr("class",o.chart),i.select("."+o.chart).append("g").attr("class",o.chartBars),i.select("."+o.chart).append("g").attr("class",o.chartLines),i.append("g").attr("clip-path",t.clipPath).attr("class",o.brush).call(t.brush),t.axes.subx=i.append("g").attr("class",o.axisX).attr("transform",t.getTranslate("subx")).attr("clip-path",e.axis_rotated?"":t.clipPathForXAxis).style("visibility",e.subchart_axis_x_show?n:"hidden")},A.updateTargetsForSubchart=function(t){var e,i=this,n=i.context,a=i.config,r=i.classChartBar.bind(i),s=i.classBars.bind(i),c=i.classChartLine.bind(i),d=i.classLines.bind(i),l=i.classAreas.bind(i);a.subchart_show&&(n.select("."+o.chartBars).selectAll("."+o.chartBar).data(t).attr("class",r).enter().append("g").style("opacity",0).attr("class",r).append("g").attr("class",s),(e=n.select("."+o.chartLines).selectAll("."+o.chartLine).data(t).attr("class",c).enter().append("g").style("opacity",0).attr("class",c)).append("g").attr("class",d),e.append("g").attr("class",l),n.selectAll("."+o.brush+" rect").attr(a.axis_rotated?"width":"height",a.axis_rotated?i.width2:i.height2))},A.updateBarForSubchart=function(t){var e=this;e.contextBar=e.context.selectAll("."+o.bars).selectAll("."+o.bar).data(e.barData.bind(e)),e.contextBar.enter().append("path").attr("class",e.classBar.bind(e)).style("stroke","none").style("fill",e.color),e.contextBar.style("opacity",e.initialOpacity.bind(e)),e.contextBar.exit().transition().duration(t).style("opacity",0).remove()},A.redrawBarForSubchart=function(t,e,i){(e?this.contextBar.transition(Math.random().toString()).duration(i):this.contextBar).attr("d",t).style("opacity",1)},A.updateLineForSubchart=function(t){var e=this;e.contextLine=e.context.selectAll("."+o.lines).selectAll("."+o.line).data(e.lineData.bind(e)),e.contextLine.enter().append("path").attr("class",e.classLine.bind(e)).style("stroke",e.color),e.contextLine.style("opacity",e.initialOpacity.bind(e)),e.contextLine.exit().transition().duration(t).style("opacity",0).remove()},A.redrawLineForSubchart=function(t,e,i){(e?this.contextLine.transition(Math.random().toString()).duration(i):this.contextLine).attr("d",t).style("opacity",1)},A.updateAreaForSubchart=function(t){var e=this,i=e.d3;e.contextArea=e.context.selectAll("."+o.areas).selectAll("."+o.area).data(e.lineData.bind(e)),e.contextArea.enter().append("path").attr("class",e.classArea.bind(e)).style("fill",e.color).style("opacity",function(){return e.orgAreaOpacity=+i.select(this).style("opacity"),0}),e.contextArea.style("opacity",0),e.contextArea.exit().transition().duration(t).style("opacity",0).remove()},A.redrawAreaForSubchart=function(t,e,i){(e?this.contextArea.transition(Math.random().toString()).duration(i):this.contextArea).attr("d",t).style("fill",this.color).style("opacity",this.orgAreaOpacity)},A.redrawSubchart=function(t,e,i,n,a,r,o){var s,c,d,l=this,u=l.d3,h=l.config;l.context.style("visibility",h.subchart_show?"visible":"hidden"),h.subchart_show&&(u.event&&"zoom"===u.event.type&&l.brush.extent(l.x.orgDomain()).update(),t&&(l.brush.empty()||l.brush.extent(l.x.orgDomain()).update(),s=l.generateDrawArea(a,!0),c=l.generateDrawBar(r,!0),d=l.generateDrawLine(o,!0),l.updateBarForSubchart(i),l.updateLineForSubchart(i),l.updateAreaForSubchart(i),l.redrawBarForSubchart(c,i,i),l.redrawLineForSubchart(d,i,i),l.redrawAreaForSubchart(s,i,i)))},A.redrawForBrush=function(){var t=this,e=t.x;t.redraw({withTransition:!1,withY:t.config.zoom_rescale,withSubchart:!1,withUpdateXDomain:!0,withDimension:!1}),t.config.subchart_onbrush.call(t.api,e.orgDomain())},A.transformContext=function(t,e){var i,n=this;e&&e.axisSubX?i=e.axisSubX:(i=n.context.select("."+o.axisX),t&&(i=i.transition())),n.context.attr("transform",n.getTranslate("context")),i.attr("transform",n.getTranslate("subx"))},A.getDefaultExtent=function(){var t=this,e=t.config,i=d(e.axis_x_extent)?e.axis_x_extent(t.getXDomain(t.data.targets)):e.axis_x_extent;return t.isTimeSeries()&&(i=[t.parseDate(i[0]),t.parseDate(i[1])]),i},A.initText=function(){var t=this;t.main.select("."+o.chart).append("g").attr("class",o.chartTexts),t.mainText=t.d3.selectAll([])},A.updateTargetsForText=function(t){var e=this,i=e.classChartText.bind(e),n=e.classTexts.bind(e),a=e.classFocus.bind(e);e.main.select("."+o.chartTexts).selectAll("."+o.chartText).data(t).attr("class",function(t){return i(t)+a(t)}).enter().append("g").attr("class",i).style("opacity",0).style("pointer-events","none").append("g").attr("class",n)},A.updateText=function(t){var e=this,i=e.config,n=e.barOrLineData.bind(e),a=e.classText.bind(e);e.mainText=e.main.selectAll("."+o.texts).selectAll("."+o.text).data(n),e.mainText.enter().append("text").attr("class",a).attr("text-anchor",function(t){return i.axis_rotated?t.value<0?"end":"start":"middle"}).style("stroke","none").style("fill",function(t){return e.color(t)}).style("fill-opacity",0),e.mainText.text(function(t,i,n){return e.dataLabelFormat(t.id)(t.value,t.id,i,n)}),e.mainText.exit().transition().duration(t).style("fill-opacity",0).remove()},A.redrawText=function(t,e,i,n){return[(n?this.mainText.transition():this.mainText).attr("x",t).attr("y",e).style("fill",this.color).style("fill-opacity",i?0:this.opacityForText.bind(this))]},A.getTextRect=function(t,e,i){var n,a=this.d3.select("body").append("div").classed("c3",!0),r=a.append("svg").style("visibility","hidden").style("position","fixed").style("top",0).style("left",0),o=this.d3.select(i).style("font");return r.selectAll(".dummy").data([t]).enter().append("text").classed(e||"",!0).style("font",o).text(t).each(function(){n=this.getBoundingClientRect()}),a.remove(),n},A.generateXYForText=function(t,e,i,n){var a=this,r=a.generateGetAreaPoints(t,!1),o=a.generateGetBarPoints(e,!1),s=a.generateGetLinePoints(i,!1),c=n?a.getXForText:a.getYForText;return function(t,e){var i=a.isAreaType(t)?r:a.isBarType(t)?o:s;return c.call(a,i(t,e),t,this)}},A.getXForText=function(t,e,i){var n,a,r=this,o=i.getBoundingClientRect();return r.config.axis_rotated?(a=r.isBarType(e)?4:6,n=t[2][1]+a*(e.value<0?-1:1)):n=r.hasType("bar")?(t[2][0]+t[0][0])/2:t[0][0],null===e.value&&(n>r.width?n=r.width-o.width:n<0&&(n=4)),n},A.getYForText=function(t,e,i){var n,a=this,r=i.getBoundingClientRect();return a.config.axis_rotated?n=(t[0][0]+t[2][0]+.6*r.height)/2:(n=t[2][1],e.value<0||0===e.value&&!a.hasPositiveValue?(n+=r.height,a.isBarType(e)&&a.isSafari()?n-=3:!a.isBarType(e)&&a.isChrome()&&(n+=3)):n+=a.isBarType(e)?-3:-6),null!==e.value||a.config.axis_rotated||(nthis.height&&(n=this.height-4)),n},A.initTitle=function(){var t=this;t.title=t.svg.append("text").text(t.config.title_text).attr("class",t.CLASS.title)},A.redrawTitle=function(){var t=this;t.title.attr("x",t.xForTitle.bind(t)).attr("y",t.yForTitle.bind(t))},A.xForTitle=function(){var t=this,e=t.config,i=e.title_position||"left";return i.indexOf("right")>=0?t.currentWidth-t.getTextRect(t.title.node().textContent,t.CLASS.title,t.title.node()).width-e.title_padding.right:i.indexOf("center")>=0?(t.currentWidth-t.getTextRect(t.title.node().textContent,t.CLASS.title,t.title.node()).width)/2:e.title_padding.left},A.yForTitle=function(){var t=this;return t.config.title_padding.top+t.getTextRect(t.title.node().textContent,t.CLASS.title,t.title.node()).height},A.getTitlePadding=function(){var t=this;return t.yForTitle()+t.config.title_padding.bottom},A.initTooltip=function(){var t,e=this,i=e.config;if(e.tooltip=e.selectChart.style("position","relative").append("div").attr("class",o.tooltipContainer).style("position","absolute").style("pointer-events","none").style("display","none"),i.tooltip_init_show){if(e.isTimeSeries()&&u(i.tooltip_init_x)){for(i.tooltip_init_x=e.parseDate(i.tooltip_init_x),t=0;t"+(o||0===o?""+o+"":"")),void 0!==(s=w(p(t[r].value,t[r].ratio,t[r].id,t[r].index,t))))){if(null===t[r].name)continue;c=w(g(t[r].name,t[r].ratio,t[r].id,t[r].index)),d=l.levelColor?l.levelColor(t[r].value):n(t[r].id),a+="",a+=""+c+"",a+=""+s+"",a+=""}return a+""},A.tooltipPosition=function(t,e,i,n){var a,r,o,s,c,d=this,l=d.config,u=d.d3,h=d.hasArcType(),g=u.mouse(n);return h?(r=(d.width-(d.isLegendRight?d.getLegendWidth():0))/2+g[0],s=d.height/2+g[1]+20):(a=d.getSvgLeft(!0),l.axis_rotated?(o=(r=a+g[0]+100)+e,c=d.currentWidth-d.getCurrentPaddingRight(),s=d.x(t[0].x)+20):(o=(r=a+d.getCurrentPaddingLeft(!0)+d.x(t[0].x)+20)+e,c=a+d.currentWidth-d.getCurrentPaddingRight(),s=g[1]+15),o>c&&(r-=o-c+20),s+i>d.currentHeight&&(s-=i+30)),s<0&&(s=0),{top:s,left:r}},A.showTooltip=function(t,e){var i,n,a,r=this,o=r.config,s=r.hasArcType(),d=t.filter(function(t){return t&&c(t.value)}),l=o.tooltip_position||A.tooltipPosition;0!==d.length&&o.tooltip_show&&(r.tooltip.html(o.tooltip_contents.call(r,t,r.axis.getXAxisTickFormat(),r.getYFormat(s),r.color)).style("display","block"),i=r.tooltip.property("offsetWidth"),n=r.tooltip.property("offsetHeight"),a=l.call(this,d,i,n,e),r.tooltip.style("top",a.top+"px").style("left",a.left+"px"))},A.hideTooltip=function(){this.tooltip.style("display","none")},A.setTargetType=function(t,e){var i=this,n=i.config;i.mapToTargetIds(t).forEach(function(t){i.withoutFadeIn[t]=e===n.data_types[t],n.data_types[t]=e}),t||(n.data_type=e)},A.hasType=function(t,e){var i=this,n=i.config.data_types,a=!1;return e=e||i.data.targets,e&&e.length?e.forEach(function(e){var i=n[e.id];(i&&i.indexOf(t)>=0||!i&&"line"===t)&&(a=!0)}):Object.keys(n).length?Object.keys(n).forEach(function(e){n[e]===t&&(a=!0)}):a=i.config.data_type===t,a},A.hasArcType=function(t){return this.hasType("pie",t)||this.hasType("donut",t)||this.hasType("gauge",t)},A.isLineType=function(t){var e=this.config,i=u(t)?t:t.id;return!e.data_types[i]||["line","spline","area","area-spline","step","area-step"].indexOf(e.data_types[i])>=0},A.isStepType=function(t){var e=u(t)?t:t.id;return["step","area-step"].indexOf(this.config.data_types[e])>=0},A.isSplineType=function(t){var e=u(t)?t:t.id;return["spline","area-spline"].indexOf(this.config.data_types[e])>=0},A.isAreaType=function(t){var e=u(t)?t:t.id;return["area","area-spline","area-step"].indexOf(this.config.data_types[e])>=0},A.isBarType=function(t){var e=u(t)?t:t.id;return"bar"===this.config.data_types[e]},A.isScatterType=function(t){var e=u(t)?t:t.id;return"scatter"===this.config.data_types[e]},A.isPieType=function(t){var e=u(t)?t:t.id;return"pie"===this.config.data_types[e]},A.isGaugeType=function(t){var e=u(t)?t:t.id;return"gauge"===this.config.data_types[e]},A.isDonutType=function(t){var e=u(t)?t:t.id;return"donut"===this.config.data_types[e]},A.isArcType=function(t){return this.isPieType(t)||this.isDonutType(t)||this.isGaugeType(t)},A.lineData=function(t){return this.isLineType(t)?[t]:[]},A.arcData=function(t){return this.isArcType(t.data)?[t]:[]},A.barData=function(t){return this.isBarType(t)?t.values:[]},A.lineOrScatterData=function(t){return this.isLineType(t)||this.isScatterType(t)?t.values:[]},A.barOrLineData=function(t){return this.isBarType(t)||this.isLineType(t)?t.values:[]},A.isInterpolationType=function(t){return["linear","linear-closed","basis","basis-open","basis-closed","bundle","cardinal","cardinal-open","cardinal-closed","monotone"].indexOf(t)>=0},A.isSafari=function(){var t=window.navigator.userAgent;return t.indexOf("Safari")>=0&&t.indexOf("Chrome")<0},A.isChrome=function(){return window.navigator.userAgent.indexOf("Chrome")>=0},A.initZoom=function(){var t,e=this,i=e.d3,n=e.config;e.zoom=i.behavior.zoom().on("zoomstart",function(){t=i.event.sourceEvent,e.zoom.altDomain=i.event.sourceEvent.altKey?e.x.orgDomain():null,n.zoom_onzoomstart.call(e.api,i.event.sourceEvent)}).on("zoom",function(){e.redrawForZoom.call(e)}).on("zoomend",function(){var a=i.event.sourceEvent;a&&t.clientX===a.clientX&&t.clientY===a.clientY||(e.redrawEventRect(),e.updateZoom(),n.zoom_onzoomend.call(e.api,e.x.orgDomain()))}),e.zoom.scale=function(t){return n.axis_rotated?this.y(t):this.x(t)},e.zoom.orgScaleExtent=function(){var t=n.zoom_extent?n.zoom_extent:[1,10];return[t[0],Math.max(e.getMaxDataCount()/t[1],t[1])]},e.zoom.updateScaleExtent=function(){var t=_(e.x.orgDomain())/_(e.getZoomDomain()),i=this.orgScaleExtent();return this.scaleExtent([i[0]*t,i[1]*t]),this}},A.getZoomDomain=function(){var t=this,e=t.config,i=t.d3;return[i.min([t.orgXDomain[0],e.zoom_x_min]),i.max([t.orgXDomain[1],e.zoom_x_max])]},A.updateZoom=function(){var t=this,e=t.config.zoom_enabled?t.zoom:function(){};t.main.select("."+o.zoomRect).call(e).on("dblclick.zoom",null),t.main.selectAll("."+o.eventRect).call(e).on("dblclick.zoom",null)},A.redrawForZoom=function(){var t=this,e=t.d3,i=t.config,n=t.zoom,a=t.x;if(i.zoom_enabled&&0!==t.filterTargetsToShow(t.data.targets).length){if("mousemove"===e.event.sourceEvent.type&&n.altDomain)return a.domain(n.altDomain),void n.scale(a).updateScaleExtent();t.isCategorized()&&a.orgDomain()[0]===t.orgXDomain[0]&&a.domain([t.orgXDomain[0]-1e-10,a.orgDomain()[1]]),t.redraw({withTransition:!1,withY:i.zoom_rescale,withSubchart:!1,withEventRect:!1,withDimension:!1}),"mousemove"===e.event.sourceEvent.type&&(t.cancelClick=!0),i.zoom_onzoom.call(t.api,a.orgDomain())}},T}); \ No newline at end of file +!function(t,e){"object"==typeof exports&&"undefined"!=typeof module?module.exports=e():"function"==typeof define&&define.amd?define(e):t.c3=e()}(this,function(){"use strict";function t(t,e){function i(t,e){t.attr("transform",function(t){return"translate("+Math.ceil(e(t)+w)+", 0)"})}function n(t,e){t.attr("transform",function(t){return"translate(0,"+Math.ceil(e(t))+")"})}function a(t){var e=t[0],i=t[t.length-1];return e0&&n[0]>0&&n.unshift(n[0]-(n[1]-n[0])),n}function s(){var t,i=_.copy();return e.isCategory&&(t=_.domain(),i.domain([t[0],t[1]-1])),i}function c(t){var e=g?g(t):t;return void 0!==e?e:""}function d(t){if(r)return r;var e={h:11.5,w:5.5};return t.select("text").text(c).each(function(t){var i=this.getBoundingClientRect(),n=c(t),a=i.height,r=n?i.width/n.length:void 0;a&&r&&(e.h=a,e.w=r)}).text(""),r=e,e}function l(i){return e.withoutTransition?i:t.transition(i)}function u(r){r.each(function(){function r(t,i){function n(t,e){r=void 0;for(var s=1;s0?1:-1):N}(j)).style("text-anchor",function(t){return t?t>0?"start":"end":"middle"}(j)).attr("transform",function(t){return t?"rotate("+t+")":""}(j)),H.attr("x",0).attr("dy",g).attr("dx",function(t){return t?8*Math.sin(Math.PI*(t/180)):0}(j)),R.attr("d","M"+I[0]+","+h+"V0H"+I[1]+"V"+h);break;case"top":p=i,D.attr("y2",-y),X.attr("y",-N),F.attr("x2",0).attr("y2",-y),k.attr("x",0).attr("y",-N),M.style("text-anchor","middle"),H.attr("x",0).attr("dy","0em"),R.attr("d","M"+I[0]+","+-h+"V0H"+I[1]+"V"+-h);break;case"left":p=n,D.attr("x2",-y),X.attr("x",-N),F.attr("x2",-y).attr("y1",b).attr("y2",b),k.attr("x",-N).attr("y",w),M.style("text-anchor","end"),H.attr("x",-N).attr("dy",g),R.attr("d","M"+-h+","+I[0]+"H0V"+I[1]+"H"+-h);break;case"right":p=n,D.attr("x2",y),X.attr("x",N),F.attr("x2",y).attr("y2",0),k.attr("x",N).attr("y",0),M.style("text-anchor","start"),H.attr("x",N).attr("dy",g),R.attr("d","M"+h+","+I[0]+"H0V"+I[1]+"H"+h)}if(P.rangeBand){var U=P,W=U.rangeBand()/2;T=P=function(t){return U(t)+W}}else T.rangeBand?T=P:G.call(p,P);V.call(p,T),E.call(p,P)})}var h,g,p,f,_=t.scale.linear(),x="bottom",y=6,m=3,S=null,w=0,v=!0;return e=e||{},h=e.withOuterTick?6:0,u.scale=function(t){return arguments.length?(_=t,u):_},u.orient=function(t){return arguments.length?(x=t in{top:1,right:1,bottom:1,left:1}?t+"":"bottom",u):x},u.tickFormat=function(t){return arguments.length?(g=t,u):g},u.tickCentered=function(t){return arguments.length?(f=t,u):f},u.tickOffset=function(){return w},u.tickInterval=function(){var t;return t=e.isCategory?2*w:(u.g.select("path.domain").node().getTotalLength()-2*h)/u.g.selectAll("line").size(),t===1/0?0:t},u.ticks=function(){return arguments.length?(p=arguments,u):p},u.tickCulling=function(t){return arguments.length?(v=t,u):v},u.tickValues=function(t){if("function"==typeof t)S=function(){return t(_.domain())};else{if(!arguments.length)return S;S=t}return u},u}function e(t){i.call(this,t)}function i(t){this.owner=t}function n(t){var e=this.internal=new a(this);e.loadConfig(t),e.beforeInit(t),e.init(),e.afterInit(t),function t(e,i,n){Object.keys(e).forEach(function(a){i[a]=e[a].bind(n),Object.keys(e[a]).length>0&&t(e[a],i[a],n)})}(b,this,this)}function a(t){var e=this;e.d3=window.d3?window.d3:"undefined"!=typeof require?require("d3"):void 0,e.api=t,e.config=e.getDefaultConfig(),e.data={},e.cache={},e.axes={}}var r,o={target:"c3-target",chart:"c3-chart",chartLine:"c3-chart-line",chartLines:"c3-chart-lines",chartBar:"c3-chart-bar",chartBars:"c3-chart-bars",chartText:"c3-chart-text",chartTexts:"c3-chart-texts",chartArc:"c3-chart-arc",chartArcs:"c3-chart-arcs",chartArcsTitle:"c3-chart-arcs-title",chartArcsBackground:"c3-chart-arcs-background",chartArcsGaugeUnit:"c3-chart-arcs-gauge-unit",chartArcsGaugeMax:"c3-chart-arcs-gauge-max",chartArcsGaugeMin:"c3-chart-arcs-gauge-min",selectedCircle:"c3-selected-circle",selectedCircles:"c3-selected-circles",eventRect:"c3-event-rect",eventRects:"c3-event-rects",eventRectsSingle:"c3-event-rects-single",eventRectsMultiple:"c3-event-rects-multiple",zoomRect:"c3-zoom-rect",brush:"c3-brush",focused:"c3-focused",defocused:"c3-defocused",region:"c3-region",regions:"c3-regions",title:"c3-title",tooltipContainer:"c3-tooltip-container",tooltip:"c3-tooltip",tooltipName:"c3-tooltip-name",shape:"c3-shape",shapes:"c3-shapes",line:"c3-line",lines:"c3-lines",bar:"c3-bar",bars:"c3-bars",circle:"c3-circle",circles:"c3-circles",arc:"c3-arc",arcs:"c3-arcs",area:"c3-area",areas:"c3-areas",empty:"c3-empty",text:"c3-text",texts:"c3-texts",gaugeValue:"c3-gauge-value",grid:"c3-grid",gridLines:"c3-grid-lines",xgrid:"c3-xgrid",xgrids:"c3-xgrids",xgridLine:"c3-xgrid-line",xgridLines:"c3-xgrid-lines",xgridFocus:"c3-xgrid-focus",ygrid:"c3-ygrid",ygrids:"c3-ygrids",ygridLine:"c3-ygrid-line",ygridLines:"c3-ygrid-lines",axis:"c3-axis",axisX:"c3-axis-x",axisXLabel:"c3-axis-x-label",axisY:"c3-axis-y",axisYLabel:"c3-axis-y-label",axisY2:"c3-axis-y2",axisY2Label:"c3-axis-y2-label",legendBackground:"c3-legend-background",legendItem:"c3-legend-item",legendItemEvent:"c3-legend-item-event",legendItemTile:"c3-legend-item-tile",legendItemHidden:"c3-legend-item-hidden",legendItemFocused:"c3-legend-item-focused",dragarea:"c3-dragarea",EXPANDED:"_expanded_",SELECTED:"_selected_",INCLUDED:"_included_"},s="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(t){return typeof t}:function(t){return t&&"function"==typeof Symbol&&t.constructor===Symbol&&t!==Symbol.prototype?"symbol":typeof t},c=function(t){return t||0===t},d=function(t){return"function"==typeof t},l=function(t){return Array.isArray(t)},u=function(t){return"string"==typeof t},h=function(t){return void 0===t},g=function(t){return void 0!==t},p=function(t){return 10*Math.ceil(t/10)},f=function(t){return Math.ceil(t)+.5},_=function(t){return t[1]-t[0]},x=function(t){return void 0===t||null===t||u(t)&&0===t.length||"object"===(void 0===t?"undefined":s(t))&&0===Object.keys(t).length},y=function(t){return!A.isEmpty(t)},m=function(t,e,i){return void 0!==t[e]?t[e]:i},S=function(t,e){var i=!1;return Object.keys(t).forEach(function(n){t[n]===e&&(i=!0)}),i},w=function(t){return"string"==typeof t?t.replace(//g,">"):t},v=function(t){var e=t.getBoundingClientRect(),i=[t.pathSegList.getItem(0),t.pathSegList.getItem(1)];return{x:i[0].x,y:Math.min(i[0].y,i[1].y),width:e.width,height:e.height}};!function(t,e){if(Object.create)e.prototype=Object.create(t.prototype);else{var i=function(){};i.prototype=t.prototype,e.prototype=new i}e.prototype.constructor=e}(i,e),e.prototype.init=function(){var t=this.owner,e=t.config,i=t.main;t.axes.x=i.append("g").attr("class",o.axis+" "+o.axisX).attr("clip-path",t.clipPathForXAxis).attr("transform",t.getTranslate("x")).style("visibility",e.axis_x_show?"visible":"hidden"),t.axes.x.append("text").attr("class",o.axisXLabel).attr("transform",e.axis_rotated?"rotate(-90)":"").style("text-anchor",this.textAnchorForXAxisLabel.bind(this)),t.axes.y=i.append("g").attr("class",o.axis+" "+o.axisY).attr("clip-path",e.axis_y_inner?"":t.clipPathForYAxis).attr("transform",t.getTranslate("y")).style("visibility",e.axis_y_show?"visible":"hidden"),t.axes.y.append("text").attr("class",o.axisYLabel).attr("transform",e.axis_rotated?"":"rotate(-90)").style("text-anchor",this.textAnchorForYAxisLabel.bind(this)),t.axes.y2=i.append("g").attr("class",o.axis+" "+o.axisY2).attr("transform",t.getTranslate("y2")).style("visibility",e.axis_y2_show?"visible":"hidden"),t.axes.y2.append("text").attr("class",o.axisY2Label).attr("transform",e.axis_rotated?"":"rotate(-90)").style("text-anchor",this.textAnchorForY2AxisLabel.bind(this))},e.prototype.getXAxis=function(e,i,n,a,r,o,s){var c=this.owner,d=c.config,l={isCategory:c.isCategorized(),withOuterTick:r,tickMultiline:d.axis_x_tick_multiline,tickWidth:d.axis_x_tick_width,tickTextRotate:s?0:d.axis_x_tick_rotate,withoutTransition:o},u=t(c.d3,l).scale(e).orient(i);return c.isTimeSeries()&&a&&"function"!=typeof a&&(a=a.map(function(t){return c.parseDate(t)})),u.tickFormat(n).tickValues(a),c.isCategorized()&&(u.tickCentered(d.axis_x_tick_centered),x(d.axis_x_tick_culling)&&(d.axis_x_tick_culling=!1)),u},e.prototype.updateXAxisTickValues=function(t,e){var i,n=this.owner,a=n.config;return(a.axis_x_tick_fit||a.axis_x_tick_count)&&(i=this.generateTickValues(n.mapTargetsToUniqueXs(t),a.axis_x_tick_count,n.isTimeSeries())),e?e.tickValues(i):(n.xAxis.tickValues(i),n.subXAxis.tickValues(i)),i},e.prototype.getYAxis=function(e,i,n,a,r,o,s){var c=this.owner,d=c.config,l={withOuterTick:r,withoutTransition:o,tickTextRotate:s?0:d.axis_y_tick_rotate},u=t(c.d3,l).scale(e).orient(i).tickFormat(n);return c.isTimeSeriesY()?u.ticks(c.d3.time[d.axis_y_tick_time_value],d.axis_y_tick_time_interval):u.tickValues(a),u},e.prototype.getId=function(t){var e=this.owner.config;return t in e.data_axes?e.data_axes[t]:"y"},e.prototype.getXAxisTickFormat=function(){var t=this.owner,e=t.config,i=t.isTimeSeries()?t.defaultAxisTimeFormat:t.isCategorized()?t.categoryName:function(t){return t<0?t.toFixed(0):t};return e.axis_x_tick_format&&(d(e.axis_x_tick_format)?i=e.axis_x_tick_format:t.isTimeSeries()&&(i=function(i){return i?t.axisTimeFormat(e.axis_x_tick_format)(i):""})),d(i)?function(e){return i.call(t,e)}:i},e.prototype.getTickValues=function(t,e){return t||(e?e.tickValues():void 0)},e.prototype.getXAxisTickValues=function(){return this.getTickValues(this.owner.config.axis_x_tick_values,this.owner.xAxis)},e.prototype.getYAxisTickValues=function(){return this.getTickValues(this.owner.config.axis_y_tick_values,this.owner.yAxis)},e.prototype.getY2AxisTickValues=function(){return this.getTickValues(this.owner.config.axis_y2_tick_values,this.owner.y2Axis)},e.prototype.getLabelOptionByAxisId=function(t){var e,i=this.owner.config;return"y"===t?e=i.axis_y_label:"y2"===t?e=i.axis_y2_label:"x"===t&&(e=i.axis_x_label),e},e.prototype.getLabelText=function(t){var e=this.getLabelOptionByAxisId(t);return u(e)?e:e?e.text:null},e.prototype.setLabelText=function(t,e){var i=this.owner.config,n=this.getLabelOptionByAxisId(t);u(n)?"y"===t?i.axis_y_label=e:"y2"===t?i.axis_y2_label=e:"x"===t&&(i.axis_x_label=e):n&&(n.text=e)},e.prototype.getLabelPosition=function(t,e){var i=this.getLabelOptionByAxisId(t),n=i&&"object"===(void 0===i?"undefined":s(i))&&i.position?i.position:e;return{isInner:n.indexOf("inner")>=0,isOuter:n.indexOf("outer")>=0,isLeft:n.indexOf("left")>=0,isCenter:n.indexOf("center")>=0,isRight:n.indexOf("right")>=0,isTop:n.indexOf("top")>=0,isMiddle:n.indexOf("middle")>=0,isBottom:n.indexOf("bottom")>=0}},e.prototype.getXAxisLabelPosition=function(){return this.getLabelPosition("x",this.owner.config.axis_rotated?"inner-top":"inner-right")},e.prototype.getYAxisLabelPosition=function(){return this.getLabelPosition("y",this.owner.config.axis_rotated?"inner-right":"inner-top")},e.prototype.getY2AxisLabelPosition=function(){return this.getLabelPosition("y2",this.owner.config.axis_rotated?"inner-right":"inner-top")},e.prototype.getLabelPositionById=function(t){return"y2"===t?this.getY2AxisLabelPosition():"y"===t?this.getYAxisLabelPosition():this.getXAxisLabelPosition()},e.prototype.textForXAxisLabel=function(){return this.getLabelText("x")},e.prototype.textForYAxisLabel=function(){return this.getLabelText("y")},e.prototype.textForY2AxisLabel=function(){return this.getLabelText("y2")},e.prototype.xForAxisLabel=function(t,e){var i=this.owner;return t?e.isLeft?0:e.isCenter?i.width/2:i.width:e.isBottom?-i.height:e.isMiddle?-i.height/2:0},e.prototype.dxForAxisLabel=function(t,e){return t?e.isLeft?"0.5em":e.isRight?"-0.5em":"0":e.isTop?"-0.5em":e.isBottom?"0.5em":"0"},e.prototype.textAnchorForAxisLabel=function(t,e){return t?e.isLeft?"start":e.isCenter?"middle":"end":e.isBottom?"start":e.isMiddle?"middle":"end"},e.prototype.xForXAxisLabel=function(){return this.xForAxisLabel(!this.owner.config.axis_rotated,this.getXAxisLabelPosition())},e.prototype.xForYAxisLabel=function(){return this.xForAxisLabel(this.owner.config.axis_rotated,this.getYAxisLabelPosition())},e.prototype.xForY2AxisLabel=function(){return this.xForAxisLabel(this.owner.config.axis_rotated,this.getY2AxisLabelPosition())},e.prototype.dxForXAxisLabel=function(){return this.dxForAxisLabel(!this.owner.config.axis_rotated,this.getXAxisLabelPosition())},e.prototype.dxForYAxisLabel=function(){return this.dxForAxisLabel(this.owner.config.axis_rotated,this.getYAxisLabelPosition())},e.prototype.dxForY2AxisLabel=function(){return this.dxForAxisLabel(this.owner.config.axis_rotated,this.getY2AxisLabelPosition())},e.prototype.dyForXAxisLabel=function(){var t=this.owner.config,e=this.getXAxisLabelPosition();return t.axis_rotated?e.isInner?"1.2em":-25-this.getMaxTickWidth("x"):e.isInner?"-0.5em":t.axis_x_height?t.axis_x_height-10:"3em"},e.prototype.dyForYAxisLabel=function(){var t=this.owner,e=this.getYAxisLabelPosition();return t.config.axis_rotated?e.isInner?"-0.5em":"3em":e.isInner?"1.2em":-10-(t.config.axis_y_inner?0:this.getMaxTickWidth("y")+10)},e.prototype.dyForY2AxisLabel=function(){var t=this.owner,e=this.getY2AxisLabelPosition();return t.config.axis_rotated?e.isInner?"1.2em":"-2.2em":e.isInner?"-0.5em":15+(t.config.axis_y2_inner?0:this.getMaxTickWidth("y2")+15)},e.prototype.textAnchorForXAxisLabel=function(){var t=this.owner;return this.textAnchorForAxisLabel(!t.config.axis_rotated,this.getXAxisLabelPosition())},e.prototype.textAnchorForYAxisLabel=function(){var t=this.owner;return this.textAnchorForAxisLabel(t.config.axis_rotated,this.getYAxisLabelPosition())},e.prototype.textAnchorForY2AxisLabel=function(){var t=this.owner;return this.textAnchorForAxisLabel(t.config.axis_rotated,this.getY2AxisLabelPosition())},e.prototype.getMaxTickWidth=function(t,e){var i,n,a,r,o=this.owner,s=o.config,c=0;return e&&o.currentMaxTickWidths[t]?o.currentMaxTickWidths[t]:(o.svg&&(i=o.filterTargetsToShow(o.data.targets),"y"===t?(n=o.y.copy().domain(o.getYDomain(i,"y")),a=this.getYAxis(n,o.yOrient,s.axis_y_tick_format,o.yAxisTickValues,!1,!0,!0)):"y2"===t?(n=o.y2.copy().domain(o.getYDomain(i,"y2")),a=this.getYAxis(n,o.y2Orient,s.axis_y2_tick_format,o.y2AxisTickValues,!1,!0,!0)):(n=o.x.copy().domain(o.getXDomain(i)),a=this.getXAxis(n,o.xOrient,o.xAxisTickFormat,o.xAxisTickValues,!1,!0,!0),this.updateXAxisTickValues(i,a)),(r=o.d3.select("body").append("div").classed("c3",!0)).append("svg").style("visibility","hidden").style("position","fixed").style("top",0).style("left",0).append("g").call(a).each(function(){o.d3.select(this).selectAll("text").each(function(){var t=this.getBoundingClientRect();c2){for(o=n-2,a=t[0],s=((r=t[t.length-1])-a)/(o+1),u=[a],c=0;c=0&&D.select(this).style("display",e%V?"none":"block")})}else O.svg.selectAll("."+o.axisX+" .tick text").style("display","block");_=O.generateDrawArea?O.generateDrawArea(X,!1):void 0,x=O.generateDrawBar?O.generateDrawBar(k):void 0,y=O.generateDrawLine?O.generateDrawLine(M,!1):void 0,S=O.generateXYForText(X,k,M,!0),w=O.generateXYForText(X,k,M,!1),i&&(O.subY.domain(O.getYDomain(z,"y")),O.subY2.domain(O.getYDomain(z,"y2"))),O.updateXgridFocus(),R.select("text."+o.text+"."+o.empty).attr("x",O.width/2).attr("y",O.height/2).text(F.data_empty_label_text).transition().style("opacity",z.length?0:1),O.updateGrid(v),O.updateRegion(v),O.updateBar(b),O.updateLine(b),O.updateArea(b),O.updateCircle(),O.hasDataLabel()&&O.updateText(b),O.redrawTitle&&O.redrawTitle(),O.redrawArc&&O.redrawArc(v,b,c),O.redrawSubchart&&O.redrawSubchart(n,e,v,b,X,k,M),R.selectAll("."+o.selectedCircles).filter(O.isBarType.bind(O)).selectAll("circle").remove(),F.interaction_enabled&&!t.flow&&g&&(O.redrawEventRect(),O.updateZoom&&O.updateZoom()),O.updateCircleY(),E=(O.config.axis_rotated?O.circleY:O.circleX).bind(O),I=(O.config.axis_rotated?O.circleX:O.circleY).bind(O),t.flow&&(P=O.generateFlow({targets:z,flow:t.flow,duration:t.flow.duration,drawBar:x,drawLine:y,drawArea:_,cx:E,cy:I,xv:B,xForText:S,yForText:w})),(v||P)&&O.isTabVisible()?D.transition().duration(v).each(function(){var e=[];[O.redrawBar(x,!0),O.redrawLine(y,!0),O.redrawArea(_,!0),O.redrawCircle(E,I,!0),O.redrawText(S,w,t.flow,!0),O.redrawRegion(!0),O.redrawGrid(!0)].forEach(function(t){t.forEach(function(t){e.push(t)})}),T=O.generateWait(),e.forEach(function(t){T.add(t)})}).call(T,function(){P&&P(),F.onrendered&&F.onrendered.call(O)}):(O.redrawBar(x),O.redrawLine(y),O.redrawArea(_),O.redrawCircle(E,I),O.redrawText(S,w,t.flow),O.redrawRegion(),O.redrawGrid(),F.onrendered&&F.onrendered.call(O)),O.mapToIds(O.data.targets).forEach(function(t){O.withoutFadeIn[t]=!0})},A.updateAndRedraw=function(t){var e,i=this,n=i.config;(t=t||{}).withTransition=m(t,"withTransition",!0),t.withTransform=m(t,"withTransform",!1),t.withLegend=m(t,"withLegend",!1),t.withUpdateXDomain=!0,t.withUpdateOrgXDomain=!0,t.withTransitionForExit=!1,t.withTransitionForTransform=m(t,"withTransitionForTransform",t.withTransition),i.updateSizes(),t.withLegend&&n.legend_show||(e=i.axis.generateTransitions(t.withTransitionForAxis?n.transition_duration:0),i.updateScales(),i.updateSvgSize(),i.transformAll(t.withTransitionForTransform,e)),i.redraw(t,e)},A.redrawWithoutRescale=function(){this.redraw({withY:!1,withSubchart:!1,withEventRect:!1,withTransitionForAxis:!1})},A.isTimeSeries=function(){return"timeseries"===this.config.axis_x_type},A.isCategorized=function(){return this.config.axis_x_type.indexOf("categor")>=0},A.isCustomX=function(){var t=this,e=t.config;return!t.isTimeSeries()&&(e.data_x||y(e.data_xs))},A.isTimeSeriesY=function(){return"timeseries"===this.config.axis_y_type},A.getTranslate=function(t){var e,i,n=this,a=n.config;return"main"===t?(e=f(n.margin.left),i=f(n.margin.top)):"context"===t?(e=f(n.margin2.left),i=f(n.margin2.top)):"legend"===t?(e=n.margin3.left,i=n.margin3.top):"x"===t?(e=0,i=a.axis_rotated?0:n.height):"y"===t?(e=0,i=a.axis_rotated?n.height:0):"y2"===t?(e=a.axis_rotated?0:n.width,i=a.axis_rotated?1:0):"subx"===t?(e=0,i=a.axis_rotated?0:n.height2):"arc"===t&&(e=n.arcWidth/2,i=n.arcHeight/2),"translate("+e+","+i+")"},A.initialOpacity=function(t){return null!==t.value&&this.withoutFadeIn[t.id]?1:0},A.initialOpacityForCircle=function(t){return null!==t.value&&this.withoutFadeIn[t.id]?this.opacityForCircle(t):0},A.opacityForCircle=function(t){var e=(d(this.config.point_show)?this.config.point_show(t):this.config.point_show)?1:0;return c(t.value)?this.isScatterType(t)?.5:e:0},A.opacityForText=function(){return this.hasDataLabel()?1:0},A.xx=function(t){return t?this.x(t.x):null},A.xv=function(t){var e=this,i=t.value;return e.isTimeSeries()?i=e.parseDate(t.value):e.isCategorized()&&"string"==typeof t.value&&(i=e.config.axis_x_categories.indexOf(t.value)),Math.ceil(e.x(i))},A.yv=function(t){var e=this,i=t.axis&&"y2"===t.axis?e.y2:e.y;return Math.ceil(i(t.value))},A.subxx=function(t){return t?this.subX(t.x):null},A.transformMain=function(t,e){var i,n,a,r=this;e&&e.axisX?i=e.axisX:(i=r.main.select("."+o.axisX),t&&(i=i.transition())),e&&e.axisY?n=e.axisY:(n=r.main.select("."+o.axisY),t&&(n=n.transition())),e&&e.axisY2?a=e.axisY2:(a=r.main.select("."+o.axisY2),t&&(a=a.transition())),(t?r.main.transition():r.main).attr("transform",r.getTranslate("main")),i.attr("transform",r.getTranslate("x")),n.attr("transform",r.getTranslate("y")),a.attr("transform",r.getTranslate("y2")),r.main.select("."+o.chartArcs).attr("transform",r.getTranslate("arc"))},A.transformAll=function(t,e){var i=this;i.transformMain(t,e),i.config.subchart_show&&i.transformContext(t,e),i.legend&&i.transformLegend(t)},A.updateSvgSize=function(){var t=this,e=t.svg.select(".c3-brush .background");t.svg.attr("width",t.currentWidth).attr("height",t.currentHeight),t.svg.selectAll(["#"+t.clipId,"#"+t.clipIdForGrid]).select("rect").attr("width",t.width).attr("height",t.height),t.svg.select("#"+t.clipIdForXAxis).select("rect").attr("x",t.getXAxisClipX.bind(t)).attr("y",t.getXAxisClipY.bind(t)).attr("width",t.getXAxisClipWidth.bind(t)).attr("height",t.getXAxisClipHeight.bind(t)),t.svg.select("#"+t.clipIdForYAxis).select("rect").attr("x",t.getYAxisClipX.bind(t)).attr("y",t.getYAxisClipY.bind(t)).attr("width",t.getYAxisClipWidth.bind(t)).attr("height",t.getYAxisClipHeight.bind(t)),t.svg.select("#"+t.clipIdForSubchart).select("rect").attr("width",t.width).attr("height",e.size()?e.attr("height"):0),t.svg.select("."+o.zoomRect).attr("width",t.width).attr("height",t.height),t.selectChart.style("max-height",t.currentHeight+"px")},A.updateDimension=function(t){var e=this;t||(e.config.axis_rotated?(e.axes.x.call(e.xAxis),e.axes.subx.call(e.subXAxis)):(e.axes.y.call(e.yAxis),e.axes.y2.call(e.y2Axis))),e.updateSizes(),e.updateScales(),e.updateSvgSize(),e.transformAll(!1)},A.observeInserted=function(t){var e,i=this;"undefined"!=typeof MutationObserver?(e=new MutationObserver(function(n){n.forEach(function(n){"childList"===n.type&&n.previousSibling&&(e.disconnect(),i.intervalForObserveInserted=window.setInterval(function(){t.node().parentNode&&(window.clearInterval(i.intervalForObserveInserted),i.updateDimension(),i.brush&&i.brush.update(),i.config.oninit.call(i),i.redraw({withTransform:!0,withUpdateXDomain:!0,withUpdateOrgXDomain:!0,withTransition:!1,withTransitionForTransform:!1,withLegend:!0}),t.transition().style("opacity",1))},10))})})).observe(t.node(),{attributes:!0,childList:!0,characterData:!0}):window.console.error("MutationObserver not defined.")},A.bindResize=function(){var t=this,e=t.config;if(t.resizeFunction=t.generateResize(),t.resizeFunction.add(function(){e.onresize.call(t)}),e.resize_auto&&t.resizeFunction.add(function(){void 0!==t.resizeTimeout&&window.clearTimeout(t.resizeTimeout),t.resizeTimeout=window.setTimeout(function(){delete t.resizeTimeout,t.api.flush()},100)}),t.resizeFunction.add(function(){e.onresized.call(t)}),window.attachEvent)window.attachEvent("onresize",t.resizeFunction);else if(window.addEventListener)window.addEventListener("resize",t.resizeFunction,!1);else{var i=window.onresize;i?i.add&&i.remove||(i=t.generateResize()).add(window.onresize):i=t.generateResize(),i.add(t.resizeFunction),window.onresize=i}},A.generateResize=function(){function t(){e.forEach(function(t){t()})}var e=[];return t.add=function(t){e.push(t)},t.remove=function(t){for(var i=0;ie.getTotalLength())break;i--}while(i>0);return i})),"SVGPathSegList"in window||(window.SVGPathSegList=function(t){this._pathElement=t,this._list=this._parsePath(this._pathElement.getAttribute("d")),this._mutationObserverConfig={attributes:!0,attributeFilter:["d"]},this._pathElementMutationObserver=new MutationObserver(this._updateListFromPathMutations.bind(this)),this._pathElementMutationObserver.observe(this._pathElement,this._mutationObserverConfig)},window.SVGPathSegList.prototype.classname="SVGPathSegList",Object.defineProperty(window.SVGPathSegList.prototype,"numberOfItems",{get:function(){return this._checkPathSynchronizedToList(),this._list.length},enumerable:!0}),Object.defineProperty(window.SVGPathElement.prototype,"pathSegList",{get:function(){return this._pathSegList||(this._pathSegList=new window.SVGPathSegList(this)),this._pathSegList},enumerable:!0}),Object.defineProperty(window.SVGPathElement.prototype,"normalizedPathSegList",{get:function(){return this.pathSegList},enumerable:!0}),Object.defineProperty(window.SVGPathElement.prototype,"animatedPathSegList",{get:function(){return this.pathSegList},enumerable:!0}),Object.defineProperty(window.SVGPathElement.prototype,"animatedNormalizedPathSegList",{get:function(){return this.pathSegList},enumerable:!0}),window.SVGPathSegList.prototype._checkPathSynchronizedToList=function(){this._updateListFromPathMutations(this._pathElementMutationObserver.takeRecords())},window.SVGPathSegList.prototype._updateListFromPathMutations=function(t){if(this._pathElement){var e=!1;t.forEach(function(t){"d"==t.attributeName&&(e=!0)}),e&&(this._list=this._parsePath(this._pathElement.getAttribute("d")))}},window.SVGPathSegList.prototype._writeListToPath=function(){this._pathElementMutationObserver.disconnect(),this._pathElement.setAttribute("d",window.SVGPathSegList._pathSegArrayAsString(this._list)),this._pathElementMutationObserver.observe(this._pathElement,this._mutationObserverConfig)},window.SVGPathSegList.prototype.segmentChanged=function(t){this._writeListToPath()},window.SVGPathSegList.prototype.clear=function(){this._checkPathSynchronizedToList(),this._list.forEach(function(t){t._owningPathSegList=null}),this._list=[],this._writeListToPath()},window.SVGPathSegList.prototype.initialize=function(t){return this._checkPathSynchronizedToList(),this._list=[t],t._owningPathSegList=this,this._writeListToPath(),t},window.SVGPathSegList.prototype._checkValidIndex=function(t){if(isNaN(t)||t<0||t>=this.numberOfItems)throw"INDEX_SIZE_ERR"},window.SVGPathSegList.prototype.getItem=function(t){return this._checkPathSynchronizedToList(),this._checkValidIndex(t),this._list[t]},window.SVGPathSegList.prototype.insertItemBefore=function(t,e){return this._checkPathSynchronizedToList(),e>this.numberOfItems&&(e=this.numberOfItems),t._owningPathSegList&&(t=t.clone()),this._list.splice(e,0,t),t._owningPathSegList=this,this._writeListToPath(),t},window.SVGPathSegList.prototype.replaceItem=function(t,e){return this._checkPathSynchronizedToList(),t._owningPathSegList&&(t=t.clone()),this._checkValidIndex(e),this._list[e]=t,t._owningPathSegList=this,this._writeListToPath(),t},window.SVGPathSegList.prototype.removeItem=function(t){this._checkPathSynchronizedToList(),this._checkValidIndex(t);var e=this._list[t];return this._list.splice(t,1),this._writeListToPath(),e},window.SVGPathSegList.prototype.appendItem=function(t){return this._checkPathSynchronizedToList(),t._owningPathSegList&&(t=t.clone()),this._list.push(t),t._owningPathSegList=this,this._writeListToPath(),t},window.SVGPathSegList._pathSegArrayAsString=function(t){var e="",i=!0;return t.forEach(function(t){i?(i=!1,e+=t._asPathString()):e+=" "+t._asPathString()}),e},window.SVGPathSegList.prototype._parsePath=function(t){if(!t||0==t.length)return[];var e=this,i=function(){this.pathSegList=[]};i.prototype.appendSegment=function(t){this.pathSegList.push(t)};var n=function(t){this._string=t,this._currentIndex=0,this._endIndex=this._string.length,this._previousCommand=window.SVGPathSeg.PATHSEG_UNKNOWN,this._skipOptionalSpaces()};n.prototype._isCurrentSpace=function(){var t=this._string[this._currentIndex];return t<=" "&&(" "==t||"\n"==t||"\t"==t||"\r"==t||"\f"==t)},n.prototype._skipOptionalSpaces=function(){for(;this._currentIndex="0"&&t<="9")&&e!=window.SVGPathSeg.PATHSEG_CLOSEPATH?e==window.SVGPathSeg.PATHSEG_MOVETO_ABS?window.SVGPathSeg.PATHSEG_LINETO_ABS:e==window.SVGPathSeg.PATHSEG_MOVETO_REL?window.SVGPathSeg.PATHSEG_LINETO_REL:e:window.SVGPathSeg.PATHSEG_UNKNOWN},n.prototype.initialCommandIsMoveTo=function(){if(!this.hasMoreData())return!0;var t=this.peekSegmentType();return t==window.SVGPathSeg.PATHSEG_MOVETO_ABS||t==window.SVGPathSeg.PATHSEG_MOVETO_REL},n.prototype._parseNumber=function(){var t=0,e=0,i=1,n=0,a=1,r=1,o=this._currentIndex;if(this._skipOptionalSpaces(),this._currentIndex"9")&&"."!=this._string.charAt(this._currentIndex))){for(var s=this._currentIndex;this._currentIndex="0"&&this._string.charAt(this._currentIndex)<="9";)this._currentIndex++;if(this._currentIndex!=s)for(var c=this._currentIndex-1,d=1;c>=s;)e+=d*(this._string.charAt(c--)-"0"),d*=10;if(this._currentIndex=this._endIndex||this._string.charAt(this._currentIndex)<"0"||this._string.charAt(this._currentIndex)>"9")return;for(;this._currentIndex="0"&&this._string.charAt(this._currentIndex)<="9";)i*=10,n+=(this._string.charAt(this._currentIndex)-"0")/i,this._currentIndex+=1}if(this._currentIndex!=o&&this._currentIndex+1=this._endIndex||this._string.charAt(this._currentIndex)<"0"||this._string.charAt(this._currentIndex)>"9")return;for(;this._currentIndex="0"&&this._string.charAt(this._currentIndex)<="9";)t*=10,t+=this._string.charAt(this._currentIndex)-"0",this._currentIndex++}var l=e+n;if(l*=a,t&&(l*=Math.pow(10,r*t)),o!=this._currentIndex)return this._skipOptionalSpacesOrDelimiter(),l}},n.prototype._parseArcFlag=function(){if(!(this._currentIndex>=this._endIndex)){var t=!1,e=this._string.charAt(this._currentIndex++);if("0"==e)t=!1;else{if("1"!=e)return;t=!0}return this._skipOptionalSpacesOrDelimiter(),t}},n.prototype.parseSegment=function(){var t=this._string[this._currentIndex],i=this._pathSegTypeFromChar(t);if(i==window.SVGPathSeg.PATHSEG_UNKNOWN){if(this._previousCommand==window.SVGPathSeg.PATHSEG_UNKNOWN)return null;if((i=this._nextCommandHelper(t,this._previousCommand))==window.SVGPathSeg.PATHSEG_UNKNOWN)return null}else this._currentIndex++;switch(this._previousCommand=i,i){case window.SVGPathSeg.PATHSEG_MOVETO_REL:return new window.SVGPathSegMovetoRel(e,this._parseNumber(),this._parseNumber());case window.SVGPathSeg.PATHSEG_MOVETO_ABS:return new window.SVGPathSegMovetoAbs(e,this._parseNumber(),this._parseNumber());case window.SVGPathSeg.PATHSEG_LINETO_REL:return new window.SVGPathSegLinetoRel(e,this._parseNumber(),this._parseNumber());case window.SVGPathSeg.PATHSEG_LINETO_ABS:return new window.SVGPathSegLinetoAbs(e,this._parseNumber(),this._parseNumber());case window.SVGPathSeg.PATHSEG_LINETO_HORIZONTAL_REL:return new window.SVGPathSegLinetoHorizontalRel(e,this._parseNumber());case window.SVGPathSeg.PATHSEG_LINETO_HORIZONTAL_ABS:return new window.SVGPathSegLinetoHorizontalAbs(e,this._parseNumber());case window.SVGPathSeg.PATHSEG_LINETO_VERTICAL_REL:return new window.SVGPathSegLinetoVerticalRel(e,this._parseNumber());case window.SVGPathSeg.PATHSEG_LINETO_VERTICAL_ABS:return new window.SVGPathSegLinetoVerticalAbs(e,this._parseNumber());case window.SVGPathSeg.PATHSEG_CLOSEPATH:return this._skipOptionalSpaces(),new window.SVGPathSegClosePath(e);case window.SVGPathSeg.PATHSEG_CURVETO_CUBIC_REL:return n={x1:this._parseNumber(),y1:this._parseNumber(),x2:this._parseNumber(),y2:this._parseNumber(),x:this._parseNumber(),y:this._parseNumber()},new window.SVGPathSegCurvetoCubicRel(e,n.x,n.y,n.x1,n.y1,n.x2,n.y2);case window.SVGPathSeg.PATHSEG_CURVETO_CUBIC_ABS:return n={x1:this._parseNumber(),y1:this._parseNumber(),x2:this._parseNumber(),y2:this._parseNumber(),x:this._parseNumber(),y:this._parseNumber()},new window.SVGPathSegCurvetoCubicAbs(e,n.x,n.y,n.x1,n.y1,n.x2,n.y2);case window.SVGPathSeg.PATHSEG_CURVETO_CUBIC_SMOOTH_REL:return n={x2:this._parseNumber(),y2:this._parseNumber(),x:this._parseNumber(),y:this._parseNumber()},new window.SVGPathSegCurvetoCubicSmoothRel(e,n.x,n.y,n.x2,n.y2);case window.SVGPathSeg.PATHSEG_CURVETO_CUBIC_SMOOTH_ABS:return n={x2:this._parseNumber(),y2:this._parseNumber(),x:this._parseNumber(),y:this._parseNumber()},new window.SVGPathSegCurvetoCubicSmoothAbs(e,n.x,n.y,n.x2,n.y2);case window.SVGPathSeg.PATHSEG_CURVETO_QUADRATIC_REL:return n={x1:this._parseNumber(),y1:this._parseNumber(),x:this._parseNumber(),y:this._parseNumber()},new window.SVGPathSegCurvetoQuadraticRel(e,n.x,n.y,n.x1,n.y1);case window.SVGPathSeg.PATHSEG_CURVETO_QUADRATIC_ABS:return n={x1:this._parseNumber(),y1:this._parseNumber(),x:this._parseNumber(),y:this._parseNumber()},new window.SVGPathSegCurvetoQuadraticAbs(e,n.x,n.y,n.x1,n.y1);case window.SVGPathSeg.PATHSEG_CURVETO_QUADRATIC_SMOOTH_REL:return new window.SVGPathSegCurvetoQuadraticSmoothRel(e,this._parseNumber(),this._parseNumber());case window.SVGPathSeg.PATHSEG_CURVETO_QUADRATIC_SMOOTH_ABS:return new window.SVGPathSegCurvetoQuadraticSmoothAbs(e,this._parseNumber(),this._parseNumber());case window.SVGPathSeg.PATHSEG_ARC_REL:return n={x1:this._parseNumber(),y1:this._parseNumber(),arcAngle:this._parseNumber(),arcLarge:this._parseArcFlag(),arcSweep:this._parseArcFlag(),x:this._parseNumber(),y:this._parseNumber()},new window.SVGPathSegArcRel(e,n.x,n.y,n.x1,n.y1,n.arcAngle,n.arcLarge,n.arcSweep);case window.SVGPathSeg.PATHSEG_ARC_ABS:var n={x1:this._parseNumber(),y1:this._parseNumber(),arcAngle:this._parseNumber(),arcLarge:this._parseArcFlag(),arcSweep:this._parseArcFlag(),x:this._parseNumber(),y:this._parseNumber()};return new window.SVGPathSegArcAbs(e,n.x,n.y,n.x1,n.y1,n.arcAngle,n.arcLarge,n.arcSweep);default:throw"Unknown path seg type."}};var a=new i,r=new n(t);if(!r.initialCommandIsMoveTo())return[];for(;r.hasMoreData();){var o=r.parseSegment();if(!o)return[];a.appendSegment(o)}return a.pathSegList}),b.axis=function(){},b.axis.labels=function(t){var e=this.internal;arguments.length&&(Object.keys(t).forEach(function(i){e.axis.setLabelText(i,t[i])}),e.axis.updateLabels())},b.axis.max=function(t){var e=this.internal,i=e.config;if(!arguments.length)return{x:i.axis_x_max,y:i.axis_y_max,y2:i.axis_y2_max};"object"===(void 0===t?"undefined":s(t))?(c(t.x)&&(i.axis_x_max=t.x),c(t.y)&&(i.axis_y_max=t.y),c(t.y2)&&(i.axis_y2_max=t.y2)):i.axis_y_max=i.axis_y2_max=t,e.redraw({withUpdateOrgXDomain:!0,withUpdateXDomain:!0})},b.axis.min=function(t){var e=this.internal,i=e.config;if(!arguments.length)return{x:i.axis_x_min,y:i.axis_y_min,y2:i.axis_y2_min};"object"===(void 0===t?"undefined":s(t))?(c(t.x)&&(i.axis_x_min=t.x),c(t.y)&&(i.axis_y_min=t.y),c(t.y2)&&(i.axis_y2_min=t.y2)):i.axis_y_min=i.axis_y2_min=t,e.redraw({withUpdateOrgXDomain:!0,withUpdateXDomain:!0})},b.axis.range=function(t){if(!arguments.length)return{max:this.axis.max(),min:this.axis.min()};void 0!==t.max&&this.axis.max(t.max),void 0!==t.min&&this.axis.min(t.min)},b.category=function(t,e){var i=this.internal,n=i.config;return arguments.length>1&&(n.axis_x_categories[t]=e,i.redraw()),n.axis_x_categories[t]},b.categories=function(t){var e=this.internal,i=e.config;return arguments.length?(i.axis_x_categories=t,e.redraw(),i.axis_x_categories):i.axis_x_categories},b.resize=function(t){var e=this.internal.config;e.size_width=t?t.width:null,e.size_height=t?t.height:null,this.flush()},b.flush=function(){this.internal.updateAndRedraw({withLegend:!0,withTransition:!1,withTransitionForTransform:!1})},b.destroy=function(){var t=this.internal;if(window.clearInterval(t.intervalForObserveInserted),void 0!==t.resizeTimeout&&window.clearTimeout(t.resizeTimeout),window.detachEvent)window.detachEvent("onresize",t.resizeFunction);else if(window.removeEventListener)window.removeEventListener("resize",t.resizeFunction);else{var e=window.onresize;e&&e.add&&e.remove&&e.remove(t.resizeFunction)}return t.selectChart.classed("c3",!1).html(""),Object.keys(t).forEach(function(e){t[e]=null}),null},b.color=function(t){return this.internal.color(t)},b.data=function(t){var e=this.internal.data.targets;return void 0===t?e:e.filter(function(e){return[].concat(t).indexOf(e.id)>=0})},b.data.shown=function(t){return this.internal.filterTargetsToShow(this.data(t))},b.data.values=function(t){var e,i=null;return t&&(i=(e=this.data(t))[0]?e[0].values.map(function(t){return t.value}):null),i},b.data.names=function(t){return this.internal.clearLegendItemTextBoxCache(),this.internal.updateDataAttributes("names",t)},b.data.colors=function(t){return this.internal.updateDataAttributes("colors",t)},b.data.axes=function(t){return this.internal.updateDataAttributes("axes",t)},b.flow=function(t){var e,i,n,a,r,o,s,d=this.internal,l=[],u=d.getMaxDataCount(),h=0,g=0;if(t.json)i=d.convertJsonToData(t.json,t.keys);else if(t.rows)i=d.convertRowsToData(t.rows);else{if(!t.columns)return;i=d.convertColumnsToData(t.columns)}e=d.convertDataToTargets(i,!0),d.data.targets.forEach(function(t){var i,n,a=!1;for(i=0;i1?a.values[a.values.length-1].x-r.x:r.x-d.getXDomain(d.data.targets)[0]:1,n=[r.x-o,r.x],d.updateXDomain(null,!0,!0,!1,n)),d.updateTargets(d.data.targets),d.redraw({flow:{index:r.index,length:h,duration:c(t.duration)?t.duration:d.config.transition_duration,done:t.done,orgDataCount:u},withLegend:!0,withTransition:u>1,withTrimXDomain:!1,withUpdateXAxis:!0})},A.generateFlow=function(t){var e=this,i=e.config,n=e.d3;return function(){var a,r,s,c=t.targets,d=t.flow,l=t.drawBar,u=t.drawLine,h=t.drawArea,g=t.cx,p=t.cy,f=t.xv,x=t.xForText,y=t.yForText,m=t.duration,S=1,w=d.index,v=d.length,b=e.getValueOnIndex(e.data.targets[0].values,w),A=e.getValueOnIndex(e.data.targets[0].values,w+v),T=e.x.domain(),P=d.duration||m,L=d.done||function(){},C=e.generateWait(),V=e.xgrid||n.selectAll([]),G=e.xgridLines||n.selectAll([]),E=e.mainRegion||n.selectAll([]),I=e.mainText||n.selectAll([]),O=e.mainBar||n.selectAll([]),R=e.mainLine||n.selectAll([]),D=e.mainArea||n.selectAll([]),F=e.mainCircle||n.selectAll([]);e.flowing=!0,e.data.targets.forEach(function(t){t.values.splice(0,v)}),s=e.updateXDomain(c,!0,!0),e.updateXGrid&&e.updateXGrid(!0),d.orgDataCount?a=1===d.orgDataCount||(b&&b.x)===(A&&A.x)?e.x(T[0])-e.x(s[0]):e.isTimeSeries()?e.x(T[0])-e.x(s[0]):e.x(b.x)-e.x(A.x):1!==e.data.targets[0].values.length?a=e.x(T[0])-e.x(s[0]):e.isTimeSeries()?(b=e.getValueOnIndex(e.data.targets[0].values,0),A=e.getValueOnIndex(e.data.targets[0].values,e.data.targets[0].values.length-1),a=e.x(b.x)-e.x(A.x)):a=_(s)/2,S=_(T)/_(s),r="translate("+a+",0) scale("+S+",1)",e.hideXGridFocus(),n.transition().ease("linear").duration(P).each(function(){C.add(e.axes.x.transition().call(e.xAxis)),C.add(O.transition().attr("transform",r)),C.add(R.transition().attr("transform",r)),C.add(D.transition().attr("transform",r)),C.add(F.transition().attr("transform",r)),C.add(I.transition().attr("transform",r)),C.add(E.filter(e.isRegionOnX).transition().attr("transform",r)),C.add(V.transition().attr("transform",r)),C.add(G.transition().attr("transform",r))}).call(C,function(){var t,n=[],a=[],r=[];if(v){for(t=0;t=0&&(e=!0)}),!e)}),r.regions},b.selected=function(t){var e=this.internal,i=e.d3;return i.merge(e.main.selectAll("."+o.shapes+e.getTargetSelectorSuffix(t)).selectAll("."+o.shape).filter(function(){return i.select(this).classed(o.SELECTED)}).map(function(t){return t.map(function(t){var e=t.__data__;return e.data?e.data:e})}))},b.select=function(t,e,i){var n=this.internal,a=n.d3,r=n.config;r.data_selection_enabled&&n.main.selectAll("."+o.shapes).selectAll("."+o.shape).each(function(s,c){var d=a.select(this),l=s.data?s.data.id:s.id,u=n.getToggle(this,s).bind(n),h=r.data_selection_grouped||!t||t.indexOf(l)>=0,g=!e||e.indexOf(c)>=0,p=d.classed(o.SELECTED);d.classed(o.line)||d.classed(o.area)||(h&&g?r.data_selection_isselectable(s)&&!p&&u(!0,d.classed(o.SELECTED,!0),s,c):void 0!==i&&i&&p&&u(!1,d.classed(o.SELECTED,!1),s,c))})},b.unselect=function(t,e){var i=this.internal,n=i.d3,a=i.config;a.data_selection_enabled&&i.main.selectAll("."+o.shapes).selectAll("."+o.shape).each(function(r,s){var c=n.select(this),d=r.data?r.data.id:r.id,l=i.getToggle(this,r).bind(i),u=a.data_selection_grouped||!t||t.indexOf(d)>=0,h=!e||e.indexOf(s)>=0,g=c.classed(o.SELECTED);c.classed(o.line)||c.classed(o.area)||u&&h&&a.data_selection_isselectable(r)&&g&&l(!1,c.classed(o.SELECTED,!1),r,s)})},b.show=function(t,e){var i,n=this.internal;t=n.mapToTargetIds(t),e=e||{},n.removeHiddenTargetIds(t),(i=n.svg.selectAll(n.selectorTargets(t))).transition().style("opacity",1,"important").call(n.endall,function(){i.style("opacity",null).style("opacity",1)}),e.withLegend&&n.showLegend(t),n.redraw({withUpdateOrgXDomain:!0,withUpdateXDomain:!0,withLegend:!0})},b.hide=function(t,e){var i,n=this.internal;t=n.mapToTargetIds(t),e=e||{},n.addHiddenTargetIds(t),(i=n.svg.selectAll(n.selectorTargets(t))).transition().style("opacity",0,"important").call(n.endall,function(){i.style("opacity",null).style("opacity",0)}),e.withLegend&&n.hideLegend(t),n.redraw({withUpdateOrgXDomain:!0,withUpdateXDomain:!0,withLegend:!0})},b.toggle=function(t,e){var i=this,n=this.internal;n.mapToTargetIds(t).forEach(function(t){n.isTargetToShow(t)?i.hide(t,e):i.show(t,e)})},b.tooltip=function(){},b.tooltip.show=function(t){var e,i,n=this.internal;t.mouse&&(i=t.mouse),t.data?n.isMultipleX()?(i=[n.x(t.data.x),n.getYScale(t.data.id)(t.data.value)],e=null):e=c(t.data.index)?t.data.index:n.getIndexByX(t.data.x):void 0!==t.x?e=n.getIndexByX(t.x):void 0!==t.index&&(e=t.index),n.dispatchEvent("mouseover",e,i),n.dispatchEvent("mousemove",e,i),n.config.tooltip_onshow.call(n,t.data)},b.tooltip.hide=function(){this.internal.dispatchEvent("mouseout",0),this.internal.config.tooltip_onhide.call(this)},b.transform=function(t,e){var i=this.internal,n=["pie","donut"].indexOf(t)>=0?{withTransform:!0}:null;i.transformTo(e,t,n)},A.transformTo=function(t,e,i){var n=this,a=!n.hasArcType(),r=i||{withTransitionForAxis:a};r.withTransitionForTransform=!1,n.transiting=!1,n.setTargetType(t,e),n.updateTargets(n.data.targets),n.updateAndRedraw(r)},b.x=function(t){var e=this.internal;return arguments.length&&(e.updateTargetX(e.data.targets,t),e.redraw({withUpdateOrgXDomain:!0,withUpdateXDomain:!0})),e.data.xs},b.xs=function(t){var e=this.internal;return arguments.length&&(e.updateTargetXs(e.data.targets,t),e.redraw({withUpdateOrgXDomain:!0,withUpdateXDomain:!0})),e.data.xs},b.zoom=function(t){var e=this.internal;return t&&(e.isTimeSeries()&&(t=t.map(function(t){return e.parseDate(t)})),e.brush.extent(t),e.redraw({withUpdateXDomain:!0,withY:e.config.zoom_rescale}),e.config.zoom_onzoom.call(this,e.x.orgDomain())),e.brush.extent()},b.zoom.enable=function(t){var e=this.internal;e.config.zoom_enabled=t,e.updateAndRedraw()},b.unzoom=function(){var t=this.internal;t.brush.clear().update(),t.redraw({withUpdateXDomain:!0})},b.zoom.max=function(t){var e=this.internal,i=e.config,n=e.d3;if(0!==t&&!t)return i.zoom_x_max;i.zoom_x_max=n.max([e.orgXDomain[1],t])},b.zoom.min=function(t){var e=this.internal,i=e.config,n=e.d3;if(0!==t&&!t)return i.zoom_x_min;i.zoom_x_min=n.min([e.orgXDomain[0],t])},b.zoom.range=function(t){if(!arguments.length)return{max:this.domain.max(),min:this.domain.min()};void 0!==t.max&&this.domain.max(t.max),void 0!==t.min&&this.domain.min(t.min)},A.initPie=function(){var t=this,e=t.d3,i=t.config;t.pie=e.layout.pie().value(function(t){return t.values.reduce(function(t,e){return t+e.value},0)}),i.data_order||t.pie.sort(null)},A.updateRadius=function(){var t=this,e=t.config,i=e.gauge_width||e.donut_width;t.radiusExpanded=Math.min(t.arcWidth,t.arcHeight)/2,t.radius=.95*t.radiusExpanded,t.innerRadiusRatio=i?(t.radius-i)/t.radius:.6,t.innerRadius=t.hasType("donut")||t.hasType("gauge")?t.radius*t.innerRadiusRatio:0},A.updateArc=function(){var t=this;t.svgArc=t.getSvgArc(),t.svgArcExpanded=t.getSvgArcExpanded(),t.svgArcExpandedSub=t.getSvgArcExpanded(.98)},A.updateAngle=function(t){var e,i,n,a,r=this,o=r.config,s=!1,c=0;return o?(r.pie(r.filterTargetsToShow(r.data.targets)).forEach(function(e){s||e.data.id!==t.data.id||(s=!0,(t=e).index=c),c++}),isNaN(t.startAngle)&&(t.startAngle=0),isNaN(t.endAngle)&&(t.endAngle=t.startAngle),r.isGaugeType(t.data)&&(e=o.gauge_min,i=o.gauge_max,n=Math.PI*(o.gauge_fullCircle?2:1)/(i-e),a=t.value.375?1.175-36/o.radius:.8)*o.radius/a:0)+","+n*r+")"),l},A.getArcRatio=function(t){var e=this,i=e.config,n=Math.PI*(e.hasType("gauge")&&!i.gauge_fullCircle?1:2);return t?(t.endAngle-t.startAngle)/n:null},A.convertToArcData=function(t){return this.addName({id:t.data.id,value:t.value,ratio:this.getArcRatio(t),index:t.index})},A.textForArcLabel=function(t){var e,i,n,a,r,o=this;return o.shouldShowArcLabel()?(e=o.updateAngle(t),i=e?e.value:null,n=o.getArcRatio(e),a=t.data.id,o.hasType("gauge")||o.meetsArcLabelThreshold(n)?(r=o.getArcLabelFormat(),r?r(i,n,a):o.defaultArcValueFormat(i,n)):""):""},A.textForGaugeMinMax=function(t,e){var i=this.getGaugeLabelExtents();return i?i(t,e):t},A.expandArc=function(t){var e,i=this;i.transiting?e=window.setInterval(function(){i.transiting||(window.clearInterval(e),i.legend.selectAll(".c3-legend-item-focused").size()>0&&i.expandArc(t))},10):(t=i.mapToTargetIds(t),i.svg.selectAll(i.selectorTargets(t,"."+o.chartArc)).each(function(t){i.shouldExpand(t.data.id)&&i.d3.select(this).selectAll("path").transition().duration(i.expandDuration(t.data.id)).attr("d",i.svgArcExpanded).transition().duration(2*i.expandDuration(t.data.id)).attr("d",i.svgArcExpandedSub).each(function(t){i.isDonutType(t.data)})}))},A.unexpandArc=function(t){var e=this;e.transiting||(t=e.mapToTargetIds(t),e.svg.selectAll(e.selectorTargets(t,"."+o.chartArc)).selectAll("path").transition().duration(function(t){return e.expandDuration(t.data.id)}).attr("d",e.svgArc),e.svg.selectAll("."+o.arc))},A.expandDuration=function(t){var e=this,i=e.config;return e.isDonutType(t)?i.donut_expand_duration:e.isGaugeType(t)?i.gauge_expand_duration:e.isPieType(t)?i.pie_expand_duration:50},A.shouldExpand=function(t){var e=this,i=e.config;return e.isDonutType(t)&&i.donut_expand||e.isGaugeType(t)&&i.gauge_expand||e.isPieType(t)&&i.pie_expand},A.shouldShowArcLabel=function(){var t=this,e=t.config,i=!0;return t.hasType("donut")?i=e.donut_label_show:t.hasType("pie")&&(i=e.pie_label_show),i},A.meetsArcLabelThreshold=function(t){var e=this,i=e.config;return t>=(e.hasType("donut")?i.donut_label_threshold:i.pie_label_threshold)},A.getArcLabelFormat=function(){var t=this,e=t.config,i=e.pie_label_format;return t.hasType("gauge")?i=e.gauge_label_format:t.hasType("donut")&&(i=e.donut_label_format),i},A.getGaugeLabelExtents=function(){return this.config.gauge_label_extents},A.getArcTitle=function(){var t=this;return t.hasType("donut")?t.config.donut_title:""},A.updateTargetsForArc=function(t){var e,i=this,n=i.main,a=i.classChartArc.bind(i),r=i.classArcs.bind(i),s=i.classFocus.bind(i);(e=n.select("."+o.chartArcs).selectAll("."+o.chartArc).data(i.pie(t)).attr("class",function(t){return a(t)+s(t.data)}).enter().append("g").attr("class",a)).append("g").attr("class",r),e.append("text").attr("dy",i.hasType("gauge")?"-.1em":".35em").style("opacity",0).style("text-anchor","middle").style("pointer-events","none")},A.initArc=function(){var t=this;t.arcs=t.main.select("."+o.chart).append("g").attr("class",o.chartArcs).attr("transform",t.getTranslate("arc")),t.arcs.append("text").attr("class",o.chartArcsTitle).style("text-anchor","middle").text(t.getArcTitle())},A.redrawArc=function(t,e,i){var n,a=this,r=a.d3,s=a.config,c=a.main;(n=c.selectAll("."+o.arcs).selectAll("."+o.arc).data(a.arcData.bind(a))).enter().append("path").attr("class",a.classArc.bind(a)).style("fill",function(t){return a.color(t.data)}).style("cursor",function(t){return s.interaction_enabled&&s.data_selection_isselectable(t)?"pointer":null}).each(function(t){a.isGaugeType(t.data)&&(t.startAngle=t.endAngle=s.gauge_startingAngle),this._current=t}),n.attr("transform",function(t){return!a.isGaugeType(t.data)&&i?"scale(0)":""}).on("mouseover",s.interaction_enabled?function(t){var e,i;a.transiting||(e=a.updateAngle(t))&&(i=a.convertToArcData(e),a.expandArc(e.data.id),a.api.focus(e.data.id),a.toggleFocusLegend(e.data.id,!0),a.config.data_onmouseover(i,this))}:null).on("mousemove",s.interaction_enabled?function(t){var e,i=a.updateAngle(t);i&&(e=[a.convertToArcData(i)],a.showTooltip(e,this))}:null).on("mouseout",s.interaction_enabled?function(t){var e,i;a.transiting||(e=a.updateAngle(t))&&(i=a.convertToArcData(e),a.unexpandArc(e.data.id),a.api.revert(),a.revertLegend(),a.hideTooltip(),a.config.data_onmouseout(i,this))}:null).on("click",s.interaction_enabled?function(t,e){var i,n=a.updateAngle(t);n&&(i=a.convertToArcData(n),a.toggleShape&&a.toggleShape(this,i,e),a.config.data_onclick.call(a.api,i,this))}:null).each(function(){a.transiting=!0}).transition().duration(t).attrTween("d",function(t){var e,i=a.updateAngle(t);return i?(isNaN(this._current.startAngle)&&(this._current.startAngle=0),isNaN(this._current.endAngle)&&(this._current.endAngle=this._current.startAngle),e=r.interpolate(this._current,i),this._current=e(0),function(i){var n=e(i);return n.data=t.data,a.getArc(n,!0)}):function(){return"M 0 0"}}).attr("transform",i?"scale(1)":"").style("fill",function(t){return a.levelColor?a.levelColor(t.data.values[0].value):a.color(t.data.id)}).call(a.endall,function(){a.transiting=!1}),n.exit().transition().duration(e).style("opacity",0).remove(),c.selectAll("."+o.chartArc).select("text").style("opacity",0).attr("class",function(t){return a.isGaugeType(t.data)?o.gaugeValue:""}).text(a.textForArcLabel.bind(a)).attr("transform",a.transformForArcLabel.bind(a)).style("font-size",function(t){return a.isGaugeType(t.data)?Math.round(a.radius/5)+"px":""}).transition().duration(t).style("opacity",function(t){return a.isTargetToShow(t.data.id)&&a.isArcType(t.data)?1:0}),c.select("."+o.chartArcsTitle).style("opacity",a.hasType("donut")||a.hasType("gauge")?1:0),a.hasType("gauge")&&(a.arcs.select("."+o.chartArcsBackground).attr("d",function(){var t={data:[{value:s.gauge_max}],startAngle:s.gauge_startingAngle,endAngle:-1*s.gauge_startingAngle};return a.getArc(t,!0,!0)}),a.arcs.select("."+o.chartArcsGaugeUnit).attr("dy",".75em").text(s.gauge_label_show?s.gauge_units:""),a.arcs.select("."+o.chartArcsGaugeMin).attr("dx",-1*(a.innerRadius+(a.radius-a.innerRadius)/(s.gauge_fullCircle?1:2))+"px").attr("dy","1.2em").text(s.gauge_label_show?a.textForGaugeMinMax(s.gauge_min,!1):""),a.arcs.select("."+o.chartArcsGaugeMax).attr("dx",a.innerRadius+(a.radius-a.innerRadius)/(s.gauge_fullCircle?1:2)+"px").attr("dy","1.2em").text(s.gauge_label_show?a.textForGaugeMinMax(s.gauge_max,!0):""))},A.initGauge=function(){var t=this.arcs;this.hasType("gauge")&&(t.append("path").attr("class",o.chartArcsBackground),t.append("text").attr("class",o.chartArcsGaugeUnit).style("text-anchor","middle").style("pointer-events","none"),t.append("text").attr("class",o.chartArcsGaugeMin).style("text-anchor","middle").style("pointer-events","none"),t.append("text").attr("class",o.chartArcsGaugeMax).style("text-anchor","middle").style("pointer-events","none"))},A.getGaugeLabelHeight=function(){return this.config.gauge_label_show?20:0},A.hasCaches=function(t){for(var e=0;e=0?o.focused:"")},A.classDefocused=function(t){return" "+(this.defocusedTargetIds.indexOf(t.id)>=0?o.defocused:"")},A.classChartText=function(t){return o.chartText+this.classTarget(t.id)},A.classChartLine=function(t){return o.chartLine+this.classTarget(t.id)},A.classChartBar=function(t){return o.chartBar+this.classTarget(t.id)},A.classChartArc=function(t){return o.chartArc+this.classTarget(t.data.id)},A.getTargetSelectorSuffix=function(t){return t||0===t?("-"+t).replace(/[\s?!@#$%^&*()_=+,.<>'":;\[\]\/|~`{}\\]/g,"-"):""},A.selectorTarget=function(t,e){return(e||"")+"."+o.target+this.getTargetSelectorSuffix(t)},A.selectorTargets=function(t,e){var i=this;return t=t||[],t.length?t.map(function(t){return i.selectorTarget(t,e)}):null},A.selectorLegend=function(t){return"."+o.legendItem+this.getTargetSelectorSuffix(t)},A.selectorLegends=function(t){var e=this;return t&&t.length?t.map(function(t){return e.selectorLegend(t)}):null},A.getClipPath=function(t){return"url("+(window.navigator.appVersion.toLowerCase().indexOf("msie 9.")>=0?"":document.URL.split("#")[0])+"#"+t+")"},A.appendClip=function(t,e){return t.append("clipPath").attr("id",e).append("rect")},A.getAxisClipX=function(t){var e=Math.max(30,this.margin.left);return t?-(1+e):-(e-1)},A.getAxisClipY=function(t){return t?-20:-this.margin.top},A.getXAxisClipX=function(){var t=this;return t.getAxisClipX(!t.config.axis_rotated)},A.getXAxisClipY=function(){var t=this;return t.getAxisClipY(!t.config.axis_rotated)},A.getYAxisClipX=function(){var t=this;return t.config.axis_y_inner?-1:t.getAxisClipX(t.config.axis_rotated)},A.getYAxisClipY=function(){var t=this;return t.getAxisClipY(t.config.axis_rotated)},A.getAxisClipWidth=function(t){var e=this,i=Math.max(30,e.margin.left),n=Math.max(30,e.margin.right);return t?e.width+2+i+n:e.margin.left+20},A.getAxisClipHeight=function(t){return(t?this.margin.bottom:this.margin.top+this.height)+20},A.getXAxisClipWidth=function(){var t=this;return t.getAxisClipWidth(!t.config.axis_rotated)},A.getXAxisClipHeight=function(){var t=this;return t.getAxisClipHeight(!t.config.axis_rotated)},A.getYAxisClipWidth=function(){var t=this;return t.getAxisClipWidth(t.config.axis_rotated)+(t.config.axis_y_inner?20:0)},A.getYAxisClipHeight=function(){var t=this;return t.getAxisClipHeight(t.config.axis_rotated)},A.generateColor=function(){var t=this,e=t.config,i=t.d3,n=e.data_colors,a=y(e.color_pattern)?e.color_pattern:i.scale.category10().range(),r=e.data_color,o=[];return function(t){var e,i=t.id||t.data&&t.data.id||t;return n[i]instanceof Function?e=n[i](t):n[i]?e=n[i]:(o.indexOf(i)<0&&o.push(i),e=a[o.indexOf(i)%a.length],n[i]=e),r instanceof Function?r(e,t):e}},A.generateLevelColor=function(){var t=this.config,e=t.color_pattern,i=t.color_threshold,n="value"===i.unit,a=i.values&&i.values.length?i.values:[],r=i.max||100;return y(t.color_threshold)?function(t){var i,o=e[e.length-1];for(i=0;i=0?n.data.xs[i]=(e&&n.data.xs[i]?n.data.xs[i]:[]).concat(t.map(function(t){return t[r]}).filter(c).map(function(t,e){return n.generateTargetX(t,i,e)})):a.data_x?n.data.xs[i]=n.getOtherTargetXs():y(a.data_xs)&&(n.data.xs[i]=n.getXValuesOfXKey(r,n.data.targets)):n.data.xs[i]=t.map(function(t,e){return e})}),r.forEach(function(t){if(!n.data.xs[t])throw new Error('x is not defined for id = "'+t+'".')}),(i=r.map(function(e,i){var r=a.data_idConverter(e);return{id:r,id_org:e,values:t.map(function(t,o){var s,c=t[n.getXKey(e)],d=null===t[e]||isNaN(t[e])?null:+t[e];return n.isCustomX()&&n.isCategorized()&&void 0!==c?(0===i&&0===o&&(a.axis_x_categories=[]),-1===(s=a.axis_x_categories.indexOf(c))&&(s=a.axis_x_categories.length,a.axis_x_categories.push(c))):s=n.generateTargetX(c,e,o),(void 0===t[e]||n.data.xs[e].length<=o)&&(s=void 0),{x:s,value:d,id:r}}).filter(function(t){return g(t.x)})}})).forEach(function(t){var e;a.data_xSort&&(t.values=t.values.sort(function(t,e){return(t.x||0===t.x?t.x:1/0)-(e.x||0===e.x?e.x:1/0)})),e=0,t.values.forEach(function(t){t.index=e++}),n.data.xs[t.id].sort(function(t,e){return t-e})}),n.hasNegativeValue=n.hasNegativeValueInTargets(i),n.hasPositiveValue=n.hasPositiveValueInTargets(i),a.data_type&&n.setTargetType(n.mapToIds(i).filter(function(t){return!(t in a.data_types)}),a.data_type),i.forEach(function(t){n.addCache(t.id_org,t)}),i},A.isX=function(t){var e=this.config;return e.data_x&&t===e.data_x||y(e.data_xs)&&S(e.data_xs,t)},A.isNotX=function(t){return!this.isX(t)},A.getXKey=function(t){var e=this.config;return e.data_x?e.data_x:y(e.data_xs)?e.data_xs[t]:null},A.getXValuesOfXKey=function(t,e){var i,n=this;return(e&&y(e)?n.mapToIds(e):[]).forEach(function(e){n.getXKey(e)===t&&(i=n.data.xs[e])}),i},A.getIndexByX=function(t){var e=this,i=e.filterByX(e.data.targets,t);return i.length?i[0].index:null},A.getXValue=function(t,e){var i=this;return t in i.data.xs&&i.data.xs[t]&&c(i.data.xs[t][e])?i.data.xs[t][e]:e},A.getOtherTargetXs=function(){var t=this,e=Object.keys(t.data.xs);return e.length?t.data.xs[e[0]]:null},A.getOtherTargetX=function(t){var e=this.getOtherTargetXs();return e&&t1},A.isMultipleX=function(){return y(this.config.data_xs)||!this.config.data_xSort||this.hasType("scatter")},A.addName=function(t){var e,i=this;return t&&(e=i.config.data_names[t.id],t.name=void 0!==e?e:t.id),t},A.getValueOnIndex=function(t,e){var i=t.filter(function(t){return t.index===e});return i.length?i[0]:null},A.updateTargetX=function(t,e){var i=this;t.forEach(function(t){t.values.forEach(function(n,a){n.x=i.generateTargetX(e[a],t.id,a)}),i.data.xs[t.id]=e})},A.updateTargetXs=function(t,e){var i=this;t.forEach(function(t){e[t.id]&&i.updateTargetX([t],e[t.id])})},A.generateTargetX=function(t,e,i){var n=this;return n.isTimeSeries()?t?n.parseDate(t):n.parseDate(n.getXValue(e,i)):n.isCustomX()&&!n.isCategorized()?c(t)?+t:n.getXValue(e,i):i},A.cloneTarget=function(t){return{id:t.id,id_org:t.id_org,values:t.values.map(function(t){return{x:t.x,value:t.value,id:t.id}})}},A.updateXs=function(){var t=this;t.data.targets.length&&(t.xs=[],t.data.targets[0].values.forEach(function(e){t.xs[e.index]=e.x}))},A.getPrevX=function(t){var e=this.xs[t-1];return void 0!==e?e:null},A.getNextX=function(t){var e=this.xs[t+1];return void 0!==e?e:null},A.getMaxDataCount=function(){var t=this;return t.d3.max(t.data.targets,function(t){return t.values.length})},A.getMaxDataCountTarget=function(t){var e,i=t.length,n=0;return i>1?t.forEach(function(t){t.values.length>n&&(e=t,n=t.values.length)}):e=i?t[0]:null,e},A.getEdgeX=function(t){var e=this;return t.length?[e.d3.min(t,function(t){return t.values[0].x}),e.d3.max(t,function(t){return t.values[t.values.length-1].x})]:[0,0]},A.mapToIds=function(t){return t.map(function(t){return t.id})},A.mapToTargetIds=function(t){var e=this;return t?[].concat(t):e.mapToIds(e.data.targets)},A.hasTarget=function(t,e){var i,n=this.mapToIds(t);for(i=0;ie?1:t>=e?0:NaN})},A.addHiddenTargetIds=function(t){t=t instanceof Array?t:new Array(t);for(var e=0;e0})},A.isOrderDesc=function(){var t=this.config;return"string"==typeof t.data_order&&"desc"===t.data_order.toLowerCase()},A.isOrderAsc=function(){var t=this.config;return"string"==typeof t.data_order&&"asc"===t.data_order.toLowerCase()},A.orderTargets=function(t){var e=this,i=e.config,n=e.isOrderAsc(),a=e.isOrderDesc();return n||a?t.sort(function(t,e){var i=function(t,e){return t+Math.abs(e.value)},a=t.values.reduce(i,0),r=e.values.reduce(i,0);return n?r-a:a-r}):d(i.data_order)?t.sort(i.data_order):l(i.data_order)&&t.sort(function(t,e){return i.data_order.indexOf(t.id)-i.data_order.indexOf(e.id)}),t},A.filterByX=function(t,e){return this.d3.merge(t.map(function(t){return t.values})).filter(function(t){return t.x-e==0})},A.filterRemoveNull=function(t){return t.filter(function(t){return c(t.value)})},A.filterByXDomain=function(t,e){return t.map(function(t){return{id:t.id,id_org:t.id_org,values:t.values.filter(function(t){return e[0]<=t.x&&t.x<=e[1]})}})},A.hasDataLabel=function(){var t=this.config;return!("boolean"!=typeof t.data_labels||!t.data_labels)||!("object"!==s(t.data_labels)||!y(t.data_labels))},A.getDataLabelLength=function(t,e,i){var n=this,a=[0,0];return n.selectChart.select("svg").selectAll(".dummy").data([t,e]).enter().append("text").text(function(t){return n.dataLabelFormat(t.id)(t)}).each(function(t,e){a[e]=1.3*this.getBoundingClientRect()[i]}).remove(),a},A.isNoneArc=function(t){return this.hasTarget(this.data.targets,t.id)},A.isArc=function(t){return"data"in t&&this.hasTarget(this.data.targets,t.data.id)},A.findSameXOfValues=function(t,e){var i,n=t[e].x,a=[];for(i=e-1;i>=0&&n===t[i].x;i--)a.push(t[i]);for(i=e;i0)for(o=s.hasNegativeValueInTargets(t),e=0;e=0})).length)for(n=a[0],o&&l[n]&&l[n].forEach(function(t,e){l[n][e]=t<0?t:0}),i=1;i0||(l[n][e]+=+t)});return s.d3.min(Object.keys(l).map(function(t){return s.d3.min(l[t])}))},A.getYDomainMax=function(t){var e,i,n,a,r,o,s=this,c=s.config,d=s.mapToIds(t),l=s.getValuesAsIdKeyed(t);if(c.data_groups.length>0)for(o=s.hasPositiveValueInTargets(t),e=0;e=0})).length)for(n=a[0],o&&l[n]&&l[n].forEach(function(t,e){l[n][e]=t>0?t:0}),i=1;i=0&&b>=0,g=v<=0&&b<=0,(c(S)&&h||c(w)&&g)&&(T=!1),T&&(h&&(v=0),g&&(b=0)),a=Math.abs(b-v),r=o=.1*a,void 0!==A&&(b=A+(s=Math.max(Math.abs(v),Math.abs(b))),v=A-s),L?(d=p.getDataLabelLength(v,b,"width"),l=_(p.y.range()),r+=a*((u=[d[0]/l,d[1]/l])[1]/(1-u[0]-u[1])),o+=a*(u[0]/(1-u[0]-u[1]))):C&&(d=p.getDataLabelLength(v,b,"height"),r+=p.axis.convertPixelsToAxisPadding(d[1],a),o+=p.axis.convertPixelsToAxisPadding(d[0],a)),"y"===e&&y(f.axis_y_padding)&&(r=p.axis.getPadding(f.axis_y_padding,"top",r,a),o=p.axis.getPadding(f.axis_y_padding,"bottom",o,a)),"y2"===e&&y(f.axis_y2_padding)&&(r=p.axis.getPadding(f.axis_y2_padding,"top",r,a),o=p.axis.getPadding(f.axis_y2_padding,"bottom",o,a)),T&&(h&&(o=v),g&&(r=-b)),n=[v-o,b+r],P?n.reverse():n)},A.getXDomainMin=function(t){var e=this,i=e.config;return void 0!==i.axis_x_min?e.isTimeSeries()?this.parseDate(i.axis_x_min):i.axis_x_min:e.d3.min(t,function(t){return e.d3.min(t.values,function(t){return t.x})})},A.getXDomainMax=function(t){var e=this,i=e.config;return void 0!==i.axis_x_max?e.isTimeSeries()?this.parseDate(i.axis_x_max):i.axis_x_max:e.d3.max(t,function(t){return e.d3.max(t.values,function(t){return t.x})})},A.getXDomainPadding=function(t){var e,i,n,a,r=this,o=r.config,d=t[1]-t[0];return i=r.isCategorized()?0:r.hasType("bar")?(e=r.getMaxDataCount())>1?d/(e-1)/2:.5:.01*d,"object"===s(o.axis_x_padding)&&y(o.axis_x_padding)?(n=c(o.axis_x_padding.left)?o.axis_x_padding.left:i,a=c(o.axis_x_padding.right)?o.axis_x_padding.right:i):n=a="number"==typeof o.axis_x_padding?o.axis_x_padding:i,{left:n,right:a}},A.getXDomain=function(t){var e=this,i=[e.getXDomainMin(t),e.getXDomainMax(t)],n=i[0],a=i[1],r=e.getXDomainPadding(i),o=0,s=0;return n-a!=0||e.isCategorized()||(e.isTimeSeries()?(n=new Date(.5*n.getTime()),a=new Date(1.5*a.getTime())):(n=0===n?1:.5*n,a=0===a?-1:1.5*a)),(n||0===n)&&(o=e.isTimeSeries()?new Date(n.getTime()-r.left):n-r.left),(a||0===a)&&(s=e.isTimeSeries()?new Date(a.getTime()+r.right):a+r.right),[o,s]},A.updateXDomain=function(t,e,i,n,a){var r=this,o=r.config;return i&&(r.x.domain(a||r.d3.extent(r.getXDomain(t))),r.orgXDomain=r.x.domain(),o.zoom_enabled&&r.zoom.scale(r.x).updateScaleExtent(),r.subX.domain(r.x.domain()),r.brush&&r.brush.scale(r.subX)),e&&(r.x.domain(a||(!r.brush||r.brush.empty()?r.orgXDomain:r.brush.extent())),o.zoom_enabled&&r.zoom.scale(r.x).updateScaleExtent()),n&&r.x.domain(r.trimXDomain(r.x.orgDomain())),r.x.domain()},A.trimXDomain=function(t){var e=this.getZoomDomain(),i=e[0],n=e[1];return t[0]<=i&&(t[1]=+t[1]+(i-t[0]),t[0]=i),n<=t[1]&&(t[0]=+t[0]-(t[1]-n),t[1]=n),t},A.drag=function(t){var e,i,n,a,r,s,c,d,l=this,u=l.config,h=l.main,g=l.d3;l.hasArcType()||u.data_selection_enabled&&(u.zoom_enabled&&!l.zoom.altDomain||u.data_selection_multiple&&(e=l.dragStart[0],i=l.dragStart[1],n=t[0],a=t[1],r=Math.min(e,n),s=Math.max(e,n),c=u.data_selection_grouped?l.margin.top:Math.min(i,a),d=u.data_selection_grouped?l.height:Math.max(i,a),h.select("."+o.dragarea).attr("x",r).attr("y",c).attr("width",s-r).attr("height",d-c),h.selectAll("."+o.shapes).selectAll("."+o.shape).filter(function(t){return u.data_selection_isselectable(t)}).each(function(t,e){var i,n,a,u,h,p,f=g.select(this),_=f.classed(o.SELECTED),x=f.classed(o.INCLUDED),y=!1;if(f.classed(o.circle))i=1*f.attr("cx"),n=1*f.attr("cy"),h=l.togglePoint,y=rd&&(c=c.filter(function(t){return(""+t).indexOf(".")<0}));return c},A.getGridFilterToRemove=function(t){return t?function(e){var i=!1;return[].concat(t).forEach(function(t){("value"in t&&e.value===t.value||"class"in t&&e.class===t.class)&&(i=!0)}),i}:function(){return!0}},A.removeGridLines=function(t,e){var i=this,n=i.config,a=i.getGridFilterToRemove(t),r=function(t){return!a(t)},s=e?o.xgridLines:o.ygridLines,c=e?o.xgridLine:o.ygridLine;i.main.select("."+s).selectAll("."+c).filter(a).transition().duration(n.transition_duration).style("opacity",0).remove(),e?n.grid_x_lines=n.grid_x_lines.filter(r):n.grid_y_lines=n.grid_y_lines.filter(r)},A.initEventRect=function(){this.main.select("."+o.chart).append("g").attr("class",o.eventRects).style("fill-opacity",0)},A.redrawEventRect=function(){var t,e,i=this,n=i.config,a=i.isMultipleX(),r=i.main.select("."+o.eventRects).style("cursor",n.zoom_enabled?n.axis_rotated?"ns-resize":"ew-resize":null).classed(o.eventRectsMultiple,a).classed(o.eventRectsSingle,!a);r.selectAll("."+o.eventRect).remove(),i.eventRect=r.selectAll("."+o.eventRect),a?(t=i.eventRect.data([0]),i.generateEventRectsForMultipleXs(t.enter()),i.updateEventRect(t)):(e=i.getMaxDataCountTarget(i.data.targets),r.datum(e?e.values:[]),i.eventRect=r.selectAll("."+o.eventRect),t=i.eventRect.data(function(t){return t}),i.generateEventRectsForSingleX(t.enter()),i.updateEventRect(t),t.exit().remove())},A.updateEventRect=function(t){var e,i,n,a,r,o,s=this,c=s.config;t=t||s.eventRect.data(function(t){return t}),s.isMultipleX()?(e=0,i=0,n=s.width,a=s.height):(!s.isCustomX()&&!s.isTimeSeries()||s.isCategorized()?(r=s.getEventRectWidth(),o=function(t){return s.x(t.x)-r/2}):(s.updateXs(),r=function(t){var e=s.getPrevX(t.index),i=s.getNextX(t.index);return null===e&&null===i?c.axis_rotated?s.height:s.width:(null===e&&(e=s.x.domain()[0]),null===i&&(i=s.x.domain()[1]),Math.max(0,(s.x(i)-s.x(e))/2))},o=function(t){var e=s.getPrevX(t.index),i=s.getNextX(t.index),n=s.data.xs[t.id][t.index];return null===e&&null===i?0:(null===e&&(e=s.x.domain()[0]),(s.x(n)+s.x(e))/2)}),e=c.axis_rotated?0:o,i=c.axis_rotated?o:0,n=c.axis_rotated?s.width:r,a=c.axis_rotated?r:s.height),t.attr("class",s.classEvent.bind(s)).attr("x",e).attr("y",i).attr("width",n).attr("height",a)},A.generateEventRectsForSingleX=function(t){var e=this,i=e.d3,n=e.config;t.append("rect").attr("class",e.classEvent.bind(e)).style("cursor",n.data_selection_enabled&&n.data_selection_grouped?"pointer":null).on("mouseover",function(t){var i=t.index;e.dragging||e.flowing||e.hasArcType()||(n.point_focus_expand_enabled&&e.expandCircles(i,null,!0),e.expandBars(i,null,!0),e.main.selectAll("."+o.shape+"-"+i).each(function(t){n.data_onmouseover.call(e.api,t)}))}).on("mouseout",function(t){var i=t.index;e.config&&(e.hasArcType()||(e.hideXGridFocus(),e.hideTooltip(),e.unexpandCircles(),e.unexpandBars(),e.main.selectAll("."+o.shape+"-"+i).each(function(t){n.data_onmouseout.call(e.api,t)})))}).on("mousemove",function(t){var a,r=t.index,s=e.svg.select("."+o.eventRect+"-"+r);e.dragging||e.flowing||e.hasArcType()||(e.isStepType(t)&&"step-after"===e.config.line_step_type&&i.mouse(this)[0]=0}).classed(o.legendItemFocused,e).transition().duration(100).style("opacity",function(){return(e?i.opacityForLegend:i.opacityForUnfocusedLegend).call(i,i.d3.select(this))})},A.revertLegend=function(){var t=this,e=t.d3;t.legend.selectAll("."+o.legendItem).classed(o.legendItemFocused,!1).transition().duration(100).style("opacity",function(){return t.opacityForLegend(e.select(this))})},A.showLegend=function(t){var e=this,i=e.config;i.legend_show||(i.legend_show=!0,e.legend.style("visibility","visible"),e.legendHasRendered||e.updateLegendWithDefaults()),e.removeHiddenLegendIds(t),e.legend.selectAll(e.selectorLegends(t)).style("visibility","visible").transition().style("opacity",function(){return e.opacityForLegend(e.d3.select(this))})},A.hideLegend=function(t){var e=this,i=e.config;i.legend_show&&x(t)&&(i.legend_show=!1,e.legend.style("visibility","hidden")),e.addHiddenLegendIds(t),e.legend.selectAll(e.selectorLegends(t)).style("opacity",0).style("visibility","hidden")},A.clearLegendItemTextBoxCache=function(){this.legendItemTextBox={}},A.updateLegend=function(t,e,i){function n(t,e){return b.legendItemTextBox[e]||(b.legendItemTextBox[e]=b.getTextRect(t.textContent,o.legendItem,t)),b.legendItemTextBox[e]}function a(e,i,a){function r(t,e){e||(o=(p-E-g)/2)=L)&&(L=u),(!C||h>=C)&&(C=h),s=b.isLegendRight||b.isLegendInset?C:L,A.legend_equally?(Object.keys(O).forEach(function(t){O[t]=L}),Object.keys(R).forEach(function(t){R[t]=C}),(o=(p-s*t.length)/2)0&&0===v.size()&&(v=b.legend.insert("g","."+o.legendItem).attr("class",o.legendBackground).append("rect")),y=b.legend.selectAll("text").data(t).text(function(t){return void 0!==A.data_names[t]?A.data_names[t]:t}).each(function(t,e){a(this,t,e)}),(_?y.transition():y).attr("x",s).attr("y",l),S=b.legend.selectAll("rect."+o.legendItemEvent).data(t),(_?S.transition():S).attr("width",function(t){return O[t]}).attr("height",function(t){return R[t]}).attr("x",c).attr("y",u),w=b.legend.selectAll("line."+o.legendItemTile).data(t),(_?w.transition():w).style("stroke",b.color).attr("x1",h).attr("y1",p).attr("x2",g).attr("y2",p),v&&(_?v.transition():v).attr("height",b.getLegendHeight()-12).attr("width",L*(X+1)+10),b.legend.selectAll("."+o.legendItem).classed(o.legendItemHidden,function(t){return!b.isTargetToShow(t)}),b.updateLegendItemWidth(L),b.updateLegendItemHeight(C),b.updateLegendStep(X),b.updateSizes(),b.updateScales(),b.updateSvgSize(),b.transformAll(x,i),b.legendHasRendered=!0},A.initRegion=function(){var t=this;t.region=t.main.append("g").attr("clip-path",t.clipPath).attr("class",o.regions)},A.updateRegion=function(t){var e=this,i=e.config;e.region.style("visibility",e.hasArcType()?"hidden":"visible"),e.mainRegion=e.main.select("."+o.regions).selectAll("."+o.region).data(i.regions),e.mainRegion.enter().append("g").append("rect").style("fill-opacity",0),e.mainRegion.attr("class",e.classRegion.bind(e)),e.mainRegion.exit().transition().duration(t).style("opacity",0).remove()},A.redrawRegion=function(t){var e=this,i=e.mainRegion.selectAll("rect").each(function(){var t=e.d3.select(this.parentNode).datum();e.d3.select(this).datum(t)}),n=e.regionX.bind(e),a=e.regionY.bind(e),r=e.regionWidth.bind(e),o=e.regionHeight.bind(e);return[(t?i.transition():i).attr("x",n).attr("y",a).attr("width",r).attr("height",o).style("fill-opacity",function(t){return c(t.opacity)?t.opacity:.1})]},A.regionX=function(t){var e=this,i=e.config,n="y"===t.axis?e.y:e.y2;return"y"===t.axis||"y2"===t.axis?i.axis_rotated&&"start"in t?n(t.start):0:i.axis_rotated?0:"start"in t?e.x(e.isTimeSeries()?e.parseDate(t.start):t.start):0},A.regionY=function(t){var e=this,i=e.config,n="y"===t.axis?e.y:e.y2;return"y"===t.axis||"y2"===t.axis?i.axis_rotated?0:"end"in t?n(t.end):0:i.axis_rotated&&"start"in t?e.x(e.isTimeSeries()?e.parseDate(t.start):t.start):0},A.regionWidth=function(t){var e,i=this,n=i.config,a=i.regionX(t),r="y"===t.axis?i.y:i.y2;return e="y"===t.axis||"y2"===t.axis?n.axis_rotated&&"end"in t?r(t.end):i.width:n.axis_rotated?i.width:"end"in t?i.x(i.isTimeSeries()?i.parseDate(t.end):t.end):i.width,ei.bar_width_max?i.bar_width_max:n},A.getBars=function(t,e){var i=this;return(e?i.main.selectAll("."+o.bars+i.getTargetSelectorSuffix(e)):i.main).selectAll("."+o.bar+(c(t)?"-"+t:""))},A.expandBars=function(t,e,i){var n=this;i&&n.unexpandBars(),n.getBars(t,e).classed(o.EXPANDED,!0)},A.unexpandBars=function(t){this.getBars(t).classed(o.EXPANDED,!1)},A.generateDrawBar=function(t,e){var i=this,n=i.config,a=i.generateGetBarPoints(t,e);return function(t,e){var i=a(t,e),r=n.axis_rotated?1:0,o=n.axis_rotated?0:1;return"M "+i[0][r]+","+i[0][o]+" L"+i[1][r]+","+i[1][o]+" L"+i[2][r]+","+i[2][o]+" L"+i[3][r]+","+i[3][o]+" z"}},A.generateGetBarPoints=function(t,e){var i=this,n=e?i.subXAxis:i.xAxis,a=t.__max__+1,r=i.getBarW(n,a),o=i.getShapeX(r,a,t,!!e),s=i.getShapeY(!!e),c=i.getShapeOffset(i.isBarType,t,!!e),d=r*(i.config.bar_space/2),l=e?i.getSubYScale:i.getYScale;return function(t,e){var n=l.call(i,t.id)(0),a=c(t,e)||n,u=o(t),h=s(t);return i.config.axis_rotated&&(0=0&&(d+=s(a[o].value)-c))}),d}},A.isWithinShape=function(t,e){var i,n=this,a=n.d3.select(t);return n.isTargetToShow(e.id)?"circle"===t.nodeName?i=n.isStepType(e)?n.isWithinStep(t,n.getYScale(e.id)(e.value)):n.isWithinCircle(t,1.5*n.pointSelectR(e)):"path"===t.nodeName&&(i=!a.classed(o.bar)||n.isWithinBar(t)):i=!1,i},A.getInterpolate=function(t){var e=this,i=e.isInterpolationType(e.config.spline_interpolation_type)?e.config.spline_interpolation_type:"cardinal";return e.isSplineType(t)?i:e.isStepType(t)?e.config.line_step_type:"linear"},A.initLine=function(){this.main.select("."+o.chart).append("g").attr("class",o.chartLines)},A.updateTargetsForLine=function(t){var e,i=this,n=i.config,a=i.classChartLine.bind(i),r=i.classLines.bind(i),s=i.classAreas.bind(i),c=i.classCircles.bind(i),d=i.classFocus.bind(i);(e=i.main.select("."+o.chartLines).selectAll("."+o.chartLine).data(t).attr("class",function(t){return a(t)+d(t)}).enter().append("g").attr("class",a).style("opacity",0).style("pointer-events","none")).append("g").attr("class",r),e.append("g").attr("class",s),e.append("g").attr("class",function(t){return i.generateClass(o.selectedCircles,t.id)}),e.append("g").attr("class",c).style("cursor",function(t){return n.data_selection_isselectable(t)?"pointer":null}),t.forEach(function(t){i.main.selectAll("."+o.selectedCircles+i.getTargetSelectorSuffix(t.id)).selectAll("."+o.selectedCircle).each(function(e){e.value=t.values[e.index].value})})},A.updateLine=function(t){var e=this;e.mainLine=e.main.selectAll("."+o.lines).selectAll("."+o.line).data(e.lineData.bind(e)),e.mainLine.enter().append("path").attr("class",e.classLine.bind(e)).style("stroke",e.color),e.mainLine.style("opacity",e.initialOpacity.bind(e)).style("shape-rendering",function(t){return e.isStepType(t)?"crispEdges":""}).attr("transform",null),e.mainLine.exit().transition().duration(t).style("opacity",0).remove()},A.redrawLine=function(t,e){return[(e?this.mainLine.transition(Math.random().toString()):this.mainLine).attr("d",t).style("stroke",this.color).style("opacity",1)]},A.generateDrawLine=function(t,e){var i=this,n=i.config,a=i.d3.svg.line(),r=i.generateGetLinePoints(t,e),o=e?i.getSubYScale:i.getYScale,s=function(t){return(e?i.subxx:i.xx).call(i,t)},c=function(t,e){return n.data_groups.length>0?r(t,e)[0][1]:o.call(i,t.id)(t.value)};return a=n.axis_rotated?a.x(c).y(s):a.x(s).y(c),n.line_connectNull||(a=a.defined(function(t){return null!=t.value})),function(t){var r,s=n.line_connectNull?i.filterRemoveNull(t.values):t.values,c=e?i.x:i.subX,d=o.call(i,t.id),l=0,u=0;return i.isLineType(t)?n.data_regions[t.id]?r=i.lineWithRegions(s,c,d,n.data_regions[t.id]):(i.isStepType(t)&&(s=i.convertValuesToStep(s)),r=a.interpolate(i.getInterpolate(t))(s)):(s[0]&&(l=c(s[0].x),u=d(s[0].value)),r=n.axis_rotated?"M "+u+" "+l:"M "+l+" "+u),r||"M 0 0"}},A.generateGetLinePoints=function(t,e){var i=this,n=i.config,a=t.__max__+1,r=i.getShapeX(0,a,t,!!e),o=i.getShapeY(!!e),s=i.getShapeOffset(i.isLineType,t,!!e),c=e?i.getSubYScale:i.getYScale;return function(t,e){var a=c.call(i,t.id)(0),d=s(t,e)||a,l=r(t),u=o(t);return n.axis_rotated&&(00?r(t,e)[0][1]:o.call(i,t.id)(i.getAreaBaseValue(t.id))},d=function(t,e){return n.data_groups.length>0?r(t,e)[1][1]:o.call(i,t.id)(t.value)};return a=n.axis_rotated?a.x0(c).x1(d).y(s):a.x(s).y0(n.area_above?0:c).y1(d),n.line_connectNull||(a=a.defined(function(t){return null!==t.value})),function(t){var e,r=n.line_connectNull?i.filterRemoveNull(t.values):t.values,o=0,s=0;return i.isAreaType(t)?(i.isStepType(t)&&(r=i.convertValuesToStep(r)),e=a.interpolate(i.getInterpolate(t))(r)):(r[0]&&(o=i.x(r[0].x),s=i.getYScale(t.id)(r[0].value)),e=n.axis_rotated?"M "+s+" "+o:"M "+o+" "+s),e||"M 0 0"}},A.getAreaBaseValue=function(){return 0},A.generateGetAreaPoints=function(t,e){var i=this,n=i.config,a=t.__max__+1,r=i.getShapeX(0,a,t,!!e),o=i.getShapeY(!!e),s=i.getShapeOffset(i.isAreaType,t,!!e),c=e?i.getSubYScale:i.getYScale;return function(t,e){var a=c.call(i,t.id)(0),d=s(t,e)||a,l=r(t),u=o(t);return n.axis_rotated&&(00?(t=i.getShapeIndices(i.isLineType),e=i.generateGetLinePoints(t),i.circleY=function(t,i){return e(t,i)[0][1]}):i.circleY=function(t){return i.getYScale(t.id)(t.value)}},A.getCircles=function(t,e){var i=this;return(e?i.main.selectAll("."+o.circles+i.getTargetSelectorSuffix(e)):i.main).selectAll("."+o.circle+(c(t)?"-"+t:""))},A.expandCircles=function(t,e,i){var n=this,a=n.pointExpandedR.bind(n);i&&n.unexpandCircles(),n.getCircles(t,e).classed(o.EXPANDED,!0).attr("r",a)},A.unexpandCircles=function(t){var e=this,i=e.pointR.bind(e);e.getCircles(t).filter(function(){return e.d3.select(this).classed(o.EXPANDED)}).classed(o.EXPANDED,!1).attr("r",i)},A.pointR=function(t){var e=this,i=e.config;return e.isStepType(t)?0:d(i.point_r)?i.point_r(t):i.point_r},A.pointExpandedR=function(t){var e=this,i=e.config;return i.point_focus_expand_enabled?i.point_focus_expand_r?i.point_focus_expand_r:1.75*e.pointR(t):e.pointR(t)},A.pointSelectR=function(t){var e=this,i=e.config;return d(i.point_select_r)?i.point_select_r(t):i.point_select_r?i.point_select_r:4*e.pointR(t)},A.isWithinCircle=function(t,e){var i=this.d3,n=i.mouse(t),a=i.select(t),r=+a.attr("cx"),o=+a.attr("cy");return Math.sqrt(Math.pow(r-n[0],2)+Math.pow(o-n[1],2))0?i:320/(t.hasType("gauge")&&!e.gauge_fullCircle?2:1)},A.getCurrentPaddingTop=function(){var t=this,e=t.config,i=c(e.padding_top)?e.padding_top:0;return t.title&&t.title.node()&&(i+=t.getTitlePadding()),i},A.getCurrentPaddingBottom=function(){var t=this.config;return c(t.padding_bottom)?t.padding_bottom:0},A.getCurrentPaddingLeft=function(t){var e=this,i=e.config;return c(i.padding_left)?i.padding_left:i.axis_rotated?i.axis_x_show?Math.max(p(e.getAxisWidthByAxisId("x",t)),40):1:!i.axis_y_show||i.axis_y_inner?e.axis.getYAxisLabelPosition().isOuter?30:1:p(e.getAxisWidthByAxisId("y",t))},A.getCurrentPaddingRight=function(){var t=this,e=t.config,i=t.isLegendRight?t.getLegendWidth()+20:0;return c(e.padding_right)?e.padding_right+1:e.axis_rotated?10+i:!e.axis_y2_show||e.axis_y2_inner?2+i+(t.axis.getY2AxisLabelPosition().isOuter?20:0):p(t.getAxisWidthByAxisId("y2"))+i},A.getParentRectValue=function(t){for(var e,i=this.selectChart.node();i&&"BODY"!==i.tagName;){try{e=i.getBoundingClientRect()[t]}catch(n){"width"===t&&(e=i.offsetWidth)}if(e)break;i=i.parentNode}return e},A.getParentWidth=function(){return this.getParentRectValue("width")},A.getParentHeight=function(){var t=this.selectChart.style("height");return t.indexOf("px")>0?+t.replace("px",""):0},A.getSvgLeft=function(t){var e=this,i=e.config,n=i.axis_rotated||!i.axis_rotated&&!i.axis_y_inner,a=i.axis_rotated?o.axisX:o.axisY,r=e.main.select("."+a).node(),s=r&&n?r.getBoundingClientRect():{right:0},c=e.selectChart.node().getBoundingClientRect(),d=e.hasArcType(),l=s.right-c.left-(d?0:e.getCurrentPaddingLeft(t));return l>0?l:0},A.getAxisWidthByAxisId=function(t,e){var i=this,n=i.axis.getLabelPositionById(t);return i.axis.getMaxTickWidth(t,e)+(n.isInner?20:40)},A.getHorizontalAxisHeight=function(t){var e=this,i=e.config,n=30;return"x"!==t||i.axis_x_show?"x"===t&&i.axis_x_height?i.axis_x_height:"y"!==t||i.axis_y_show?"y2"!==t||i.axis_y2_show?("x"===t&&!i.axis_rotated&&i.axis_x_tick_rotate&&(n=30+e.axis.getMaxTickWidth(t)*Math.cos(Math.PI*(90-i.axis_x_tick_rotate)/180)),"y"===t&&i.axis_rotated&&i.axis_y_tick_rotate&&(n=30+e.axis.getMaxTickWidth(t)*Math.cos(Math.PI*(90-i.axis_y_tick_rotate)/180)),n+(e.axis.getLabelPositionById(t).isInner?0:10)+("y2"===t?-10:0)):e.rotated_padding_top:!i.legend_show||e.isLegendRight||e.isLegendInset?1:10:8},A.getEventRectWidth=function(){return Math.max(0,this.xAxis.tickInterval())},A.initBrush=function(){var t=this,e=t.d3;t.brush=e.svg.brush().on("brush",function(){t.redrawForBrush()}),t.brush.update=function(){return t.context&&t.context.select("."+o.brush).call(this),this},t.brush.scale=function(e){return t.config.axis_rotated?this.y(e):this.x(e)}},A.initSubchart=function(){var t=this,e=t.config,i=t.context=t.svg.append("g").attr("transform",t.getTranslate("context")),n=e.subchart_show?"visible":"hidden";i.style("visibility",n),i.append("g").attr("clip-path",t.clipPathForSubchart).attr("class",o.chart),i.select("."+o.chart).append("g").attr("class",o.chartBars),i.select("."+o.chart).append("g").attr("class",o.chartLines),i.append("g").attr("clip-path",t.clipPath).attr("class",o.brush).call(t.brush),t.axes.subx=i.append("g").attr("class",o.axisX).attr("transform",t.getTranslate("subx")).attr("clip-path",e.axis_rotated?"":t.clipPathForXAxis).style("visibility",e.subchart_axis_x_show?n:"hidden")},A.updateTargetsForSubchart=function(t){var e,i=this,n=i.context,a=i.config,r=i.classChartBar.bind(i),s=i.classBars.bind(i),c=i.classChartLine.bind(i),d=i.classLines.bind(i),l=i.classAreas.bind(i);a.subchart_show&&(n.select("."+o.chartBars).selectAll("."+o.chartBar).data(t).attr("class",r).enter().append("g").style("opacity",0).attr("class",r).append("g").attr("class",s),(e=n.select("."+o.chartLines).selectAll("."+o.chartLine).data(t).attr("class",c).enter().append("g").style("opacity",0).attr("class",c)).append("g").attr("class",d),e.append("g").attr("class",l),n.selectAll("."+o.brush+" rect").attr(a.axis_rotated?"width":"height",a.axis_rotated?i.width2:i.height2))},A.updateBarForSubchart=function(t){var e=this;e.contextBar=e.context.selectAll("."+o.bars).selectAll("."+o.bar).data(e.barData.bind(e)),e.contextBar.enter().append("path").attr("class",e.classBar.bind(e)).style("stroke","none").style("fill",e.color),e.contextBar.style("opacity",e.initialOpacity.bind(e)),e.contextBar.exit().transition().duration(t).style("opacity",0).remove()},A.redrawBarForSubchart=function(t,e,i){(e?this.contextBar.transition(Math.random().toString()).duration(i):this.contextBar).attr("d",t).style("opacity",1)},A.updateLineForSubchart=function(t){var e=this;e.contextLine=e.context.selectAll("."+o.lines).selectAll("."+o.line).data(e.lineData.bind(e)),e.contextLine.enter().append("path").attr("class",e.classLine.bind(e)).style("stroke",e.color),e.contextLine.style("opacity",e.initialOpacity.bind(e)),e.contextLine.exit().transition().duration(t).style("opacity",0).remove()},A.redrawLineForSubchart=function(t,e,i){(e?this.contextLine.transition(Math.random().toString()).duration(i):this.contextLine).attr("d",t).style("opacity",1)},A.updateAreaForSubchart=function(t){var e=this,i=e.d3;e.contextArea=e.context.selectAll("."+o.areas).selectAll("."+o.area).data(e.lineData.bind(e)),e.contextArea.enter().append("path").attr("class",e.classArea.bind(e)).style("fill",e.color).style("opacity",function(){return e.orgAreaOpacity=+i.select(this).style("opacity"),0}),e.contextArea.style("opacity",0),e.contextArea.exit().transition().duration(t).style("opacity",0).remove()},A.redrawAreaForSubchart=function(t,e,i){(e?this.contextArea.transition(Math.random().toString()).duration(i):this.contextArea).attr("d",t).style("fill",this.color).style("opacity",this.orgAreaOpacity)},A.redrawSubchart=function(t,e,i,n,a,r,o){var s,c,d,l=this,u=l.d3,h=l.config;l.context.style("visibility",h.subchart_show?"visible":"hidden"),h.subchart_show&&(u.event&&"zoom"===u.event.type&&l.brush.extent(l.x.orgDomain()).update(),t&&(l.brush.empty()||l.brush.extent(l.x.orgDomain()).update(),s=l.generateDrawArea(a,!0),c=l.generateDrawBar(r,!0),d=l.generateDrawLine(o,!0),l.updateBarForSubchart(i),l.updateLineForSubchart(i),l.updateAreaForSubchart(i),l.redrawBarForSubchart(c,i,i),l.redrawLineForSubchart(d,i,i),l.redrawAreaForSubchart(s,i,i)))},A.redrawForBrush=function(){var t=this,e=t.x;t.redraw({withTransition:!1,withY:t.config.zoom_rescale,withSubchart:!1,withUpdateXDomain:!0,withDimension:!1}),t.config.subchart_onbrush.call(t.api,e.orgDomain())},A.transformContext=function(t,e){var i,n=this;e&&e.axisSubX?i=e.axisSubX:(i=n.context.select("."+o.axisX),t&&(i=i.transition())),n.context.attr("transform",n.getTranslate("context")),i.attr("transform",n.getTranslate("subx"))},A.getDefaultExtent=function(){var t=this,e=t.config,i=d(e.axis_x_extent)?e.axis_x_extent(t.getXDomain(t.data.targets)):e.axis_x_extent;return t.isTimeSeries()&&(i=[t.parseDate(i[0]),t.parseDate(i[1])]),i},A.initText=function(){var t=this;t.main.select("."+o.chart).append("g").attr("class",o.chartTexts),t.mainText=t.d3.selectAll([])},A.updateTargetsForText=function(t){var e=this,i=e.classChartText.bind(e),n=e.classTexts.bind(e),a=e.classFocus.bind(e);e.main.select("."+o.chartTexts).selectAll("."+o.chartText).data(t).attr("class",function(t){return i(t)+a(t)}).enter().append("g").attr("class",i).style("opacity",0).style("pointer-events","none").append("g").attr("class",n)},A.updateText=function(t){var e=this,i=e.config,n=e.barOrLineData.bind(e),a=e.classText.bind(e);e.mainText=e.main.selectAll("."+o.texts).selectAll("."+o.text).data(n),e.mainText.enter().append("text").attr("class",a).attr("text-anchor",function(t){return i.axis_rotated?t.value<0?"end":"start":"middle"}).style("stroke","none").style("fill",function(t){return e.color(t)}).style("fill-opacity",0),e.mainText.text(function(t,i,n){return e.dataLabelFormat(t.id)(t.value,t.id,i,n)}),e.mainText.exit().transition().duration(t).style("fill-opacity",0).remove()},A.redrawText=function(t,e,i,n){return[(n?this.mainText.transition():this.mainText).attr("x",t).attr("y",e).style("fill",this.color).style("fill-opacity",i?0:this.opacityForText.bind(this))]},A.getTextRect=function(t,e,i){var n,a=this.d3.select("body").append("div").classed("c3",!0),r=a.append("svg").style("visibility","hidden").style("position","fixed").style("top",0).style("left",0),o=this.d3.select(i).style("font");return r.selectAll(".dummy").data([t]).enter().append("text").classed(e||"",!0).style("font",o).text(t).each(function(){n=this.getBoundingClientRect()}),a.remove(),n},A.generateXYForText=function(t,e,i,n){var a=this,r=a.generateGetAreaPoints(t,!1),o=a.generateGetBarPoints(e,!1),s=a.generateGetLinePoints(i,!1),c=n?a.getXForText:a.getYForText;return function(t,e){var i=a.isAreaType(t)?r:a.isBarType(t)?o:s;return c.call(a,i(t,e),t,this)}},A.getXForText=function(t,e,i){var n,a,r=this,o=i.getBoundingClientRect();return r.config.axis_rotated?(a=r.isBarType(e)?4:6,n=t[2][1]+a*(e.value<0?-1:1)):n=r.hasType("bar")?(t[2][0]+t[0][0])/2:t[0][0],null===e.value&&(n>r.width?n=r.width-o.width:n<0&&(n=4)),n},A.getYForText=function(t,e,i){var n,a=this,r=i.getBoundingClientRect();return a.config.axis_rotated?n=(t[0][0]+t[2][0]+.6*r.height)/2:(n=t[2][1],e.value<0||0===e.value&&!a.hasPositiveValue?(n+=r.height,a.isBarType(e)&&a.isSafari()?n-=3:!a.isBarType(e)&&a.isChrome()&&(n+=3)):n+=a.isBarType(e)?-3:-6),null!==e.value||a.config.axis_rotated||(nthis.height&&(n=this.height-4)),n},A.initTitle=function(){var t=this;t.title=t.svg.append("text").text(t.config.title_text).attr("class",t.CLASS.title)},A.redrawTitle=function(){var t=this;t.title.attr("x",t.xForTitle.bind(t)).attr("y",t.yForTitle.bind(t))},A.xForTitle=function(){var t=this,e=t.config,i=e.title_position||"left";return i.indexOf("right")>=0?t.currentWidth-t.getTextRect(t.title.node().textContent,t.CLASS.title,t.title.node()).width-e.title_padding.right:i.indexOf("center")>=0?(t.currentWidth-t.getTextRect(t.title.node().textContent,t.CLASS.title,t.title.node()).width)/2:e.title_padding.left},A.yForTitle=function(){var t=this;return t.config.title_padding.top+t.getTextRect(t.title.node().textContent,t.CLASS.title,t.title.node()).height},A.getTitlePadding=function(){var t=this;return t.yForTitle()+t.config.title_padding.bottom},A.initTooltip=function(){var t,e=this,i=e.config;if(e.tooltip=e.selectChart.style("position","relative").append("div").attr("class",o.tooltipContainer).style("position","absolute").style("pointer-events","none").style("display","none"),i.tooltip_init_show){if(e.isTimeSeries()&&u(i.tooltip_init_x)){for(i.tooltip_init_x=e.parseDate(i.tooltip_init_x),t=0;t"+(o||0===o?""+o+"":"")),void 0!==(s=w(p(t[r].value,t[r].ratio,t[r].id,t[r].index,t))))){if(null===t[r].name)continue;c=w(g(t[r].name,t[r].ratio,t[r].id,t[r].index)),d=l.levelColor?l.levelColor(t[r].value):n(t[r].id),a+="",a+=""+c+"",a+=""+s+"",a+=""}return a+""},A.tooltipPosition=function(t,e,i,n){var a,r,o,s,c,d=this,l=d.config,u=d.d3,h=d.hasArcType(),g=u.mouse(n);return h?(r=(d.width-(d.isLegendRight?d.getLegendWidth():0))/2+g[0],s=d.height/2+g[1]+20):(a=d.getSvgLeft(!0),l.axis_rotated?(o=(r=a+g[0]+100)+e,c=d.currentWidth-d.getCurrentPaddingRight(),s=d.x(t[0].x)+20):(o=(r=a+d.getCurrentPaddingLeft(!0)+d.x(t[0].x)+20)+e,c=a+d.currentWidth-d.getCurrentPaddingRight(),s=g[1]+15),o>c&&(r-=o-c+20),s+i>d.currentHeight&&(s-=i+30)),s<0&&(s=0),{top:s,left:r}},A.showTooltip=function(t,e){var i,n,a,r=this,o=r.config,s=r.hasArcType(),d=t.filter(function(t){return t&&c(t.value)}),l=o.tooltip_position||A.tooltipPosition;0!==d.length&&o.tooltip_show&&(r.tooltip.html(o.tooltip_contents.call(r,t,r.axis.getXAxisTickFormat(),r.getYFormat(s),r.color)).style("display","block"),i=r.tooltip.property("offsetWidth"),n=r.tooltip.property("offsetHeight"),a=l.call(this,d,i,n,e),r.tooltip.style("top",a.top+"px").style("left",a.left+"px"))},A.hideTooltip=function(){this.tooltip.style("display","none")},A.setTargetType=function(t,e){var i=this,n=i.config;i.mapToTargetIds(t).forEach(function(t){i.withoutFadeIn[t]=e===n.data_types[t],n.data_types[t]=e}),t||(n.data_type=e)},A.hasType=function(t,e){var i=this,n=i.config.data_types,a=!1;return e=e||i.data.targets,e&&e.length?e.forEach(function(e){var i=n[e.id];(i&&i.indexOf(t)>=0||!i&&"line"===t)&&(a=!0)}):Object.keys(n).length?Object.keys(n).forEach(function(e){n[e]===t&&(a=!0)}):a=i.config.data_type===t,a},A.hasArcType=function(t){return this.hasType("pie",t)||this.hasType("donut",t)||this.hasType("gauge",t)},A.isLineType=function(t){var e=this.config,i=u(t)?t:t.id;return!e.data_types[i]||["line","spline","area","area-spline","step","area-step"].indexOf(e.data_types[i])>=0},A.isStepType=function(t){var e=u(t)?t:t.id;return["step","area-step"].indexOf(this.config.data_types[e])>=0},A.isSplineType=function(t){var e=u(t)?t:t.id;return["spline","area-spline"].indexOf(this.config.data_types[e])>=0},A.isAreaType=function(t){var e=u(t)?t:t.id;return["area","area-spline","area-step"].indexOf(this.config.data_types[e])>=0},A.isBarType=function(t){var e=u(t)?t:t.id;return"bar"===this.config.data_types[e]},A.isScatterType=function(t){var e=u(t)?t:t.id;return"scatter"===this.config.data_types[e]},A.isPieType=function(t){var e=u(t)?t:t.id;return"pie"===this.config.data_types[e]},A.isGaugeType=function(t){var e=u(t)?t:t.id;return"gauge"===this.config.data_types[e]},A.isDonutType=function(t){var e=u(t)?t:t.id;return"donut"===this.config.data_types[e]},A.isArcType=function(t){return this.isPieType(t)||this.isDonutType(t)||this.isGaugeType(t)},A.lineData=function(t){return this.isLineType(t)?[t]:[]},A.arcData=function(t){return this.isArcType(t.data)?[t]:[]},A.barData=function(t){return this.isBarType(t)?t.values:[]},A.lineOrScatterData=function(t){return this.isLineType(t)||this.isScatterType(t)?t.values:[]},A.barOrLineData=function(t){return this.isBarType(t)||this.isLineType(t)?t.values:[]},A.isInterpolationType=function(t){return["linear","linear-closed","basis","basis-open","basis-closed","bundle","cardinal","cardinal-open","cardinal-closed","monotone"].indexOf(t)>=0},A.isSafari=function(){var t=window.navigator.userAgent;return t.indexOf("Safari")>=0&&t.indexOf("Chrome")<0},A.isChrome=function(){return window.navigator.userAgent.indexOf("Chrome")>=0},A.initZoom=function(){var t,e=this,i=e.d3,n=e.config;e.zoom=i.behavior.zoom().on("zoomstart",function(){t=i.event.sourceEvent,e.zoom.altDomain=i.event.sourceEvent.altKey?e.x.orgDomain():null,n.zoom_onzoomstart.call(e.api,i.event.sourceEvent)}).on("zoom",function(){e.redrawForZoom.call(e)}).on("zoomend",function(){var a=i.event.sourceEvent;a&&t.clientX===a.clientX&&t.clientY===a.clientY||(e.redrawEventRect(),e.updateZoom(),n.zoom_onzoomend.call(e.api,e.x.orgDomain()))}),e.zoom.scale=function(t){return n.axis_rotated?this.y(t):this.x(t)},e.zoom.orgScaleExtent=function(){var t=n.zoom_extent?n.zoom_extent:[1,10];return[t[0],Math.max(e.getMaxDataCount()/t[1],t[1])]},e.zoom.updateScaleExtent=function(){var t=_(e.x.orgDomain())/_(e.getZoomDomain()),i=this.orgScaleExtent();return this.scaleExtent([i[0]*t,i[1]*t]),this}},A.getZoomDomain=function(){var t=this,e=t.config,i=t.d3;return[i.min([t.orgXDomain[0],e.zoom_x_min]),i.max([t.orgXDomain[1],e.zoom_x_max])]},A.updateZoom=function(){var t=this,e=t.config.zoom_enabled?t.zoom:function(){};t.main.select("."+o.zoomRect).call(e).on("dblclick.zoom",null),t.main.selectAll("."+o.eventRect).call(e).on("dblclick.zoom",null)},A.redrawForZoom=function(){var t=this,e=t.d3,i=t.config,n=t.zoom,a=t.x;if(i.zoom_enabled&&0!==t.filterTargetsToShow(t.data.targets).length){if("mousemove"===e.event.sourceEvent.type&&n.altDomain)return a.domain(n.altDomain),void n.scale(a).updateScaleExtent();t.isCategorized()&&a.orgDomain()[0]===t.orgXDomain[0]&&a.domain([t.orgXDomain[0]-1e-10,a.orgDomain()[1]]),t.redraw({withTransition:!1,withY:i.zoom_rescale,withSubchart:!1,withEventRect:!1,withDimension:!1}),"mousemove"===e.event.sourceEvent.type&&(t.cancelClick=!0),i.zoom_onzoom.call(t.api,a.orgDomain())}},T}); diff --git a/ui/js/coffee/account.coffee b/ui/js/coffee/account.coffee index d4b84d42..227e8c4d 100644 --- a/ui/js/coffee/account.coffee +++ b/ui/js/coffee/account.coffee @@ -47,7 +47,7 @@ validateSignup = (json, state) -> document.getElementById('signupmsg').innerHTML = "Account created! Please check your inbox for verification instructions." else document.getElementById('signupmsg').innerHTML = "

Error: " + json.message + "

" - + login = (form) -> if form.email.value.length > 5 and form.password.value.length > 0 cog(document.getElementById('loginmsg')) @@ -94,10 +94,9 @@ getResetToken = (json, state) -> btn = mk('input', { type: 'button', onclick: 'doResetPass()', value: 'Reset your password'}) form.setAttribute("onsubmit", "return doResetPass();") app(form, btn) - + resetpw = () -> email = get('email').value remail = email post('account',{ reset: email } , null, getResetToken) return false - \ No newline at end of file diff --git a/ui/js/coffee/charts_gauge.coffee b/ui/js/coffee/charts_gauge.coffee index 0ce2ea28..44dae448 100644 --- a/ui/js/coffee/charts_gauge.coffee +++ b/ui/js/coffee/charts_gauge.coffee @@ -1,7 +1,7 @@ charts_gaugechart = (obj, data) -> if data.gauge data = data.gauge - + config = { bindto: obj, data: { @@ -29,12 +29,11 @@ charts_gaugechart = (obj, data) -> gauge = (json, state) -> - + lmain = new HTML('div') state.widget.inject(lmain, true) - + if json.gauge and json.gauge.text lmain.inject(new HTML('p', {}, json.gauge.text)) - - gaugeChart = new Chart(lmain, 'gauge', json) + gaugeChart = new Chart(lmain, 'gauge', json) diff --git a/ui/js/coffee/charts_linechart.coffee b/ui/js/coffee/charts_linechart.coffee index 441a3a23..76bc7607 100644 --- a/ui/js/coffee/charts_linechart.coffee +++ b/ui/js/coffee/charts_linechart.coffee @@ -73,7 +73,7 @@ charts_linechart = (obj, data, options) -> keys = data.sortOrder else keys = Object.keys(xts) - + for key in keys val = xts[key] xx = [key] diff --git a/ui/js/coffee/charts_linked_map.coffee b/ui/js/coffee/charts_linked_map.coffee index 05a10d00..503ed562 100644 --- a/ui/js/coffee/charts_linked_map.coffee +++ b/ui/js/coffee/charts_linked_map.coffee @@ -11,27 +11,27 @@ charts_linked = (obj, nodes, links, options) -> bb = obj.getBoundingClientRect() llwidth = bb.width llheight = Math.max(600, bb.height) - - tooltip = d3.select("body").append("div") - .attr("class", "link_tooltip") + + tooltip = d3.select("body").append("div") + .attr("class", "link_tooltip") .style("opacity", 0); - + avg = links.length / nodes.length - + force = d3.layout.force() .gravity(0.015) .distance(llheight/8) .charge(-200/Math.log10(nodes.length)) .linkStrength(0.2/avg) .size([llwidth, llheight]) - + edges = [] links.forEach((e) -> sourceNode = nodes.filter((n) => n.id == e.source)[0] targetNode = nodes.filter((n) => n.id == e.target)[0] edges.push({source: sourceNode, target: targetNode, s: e.source, value: e.value, name: e.name, tooltip: e.tooltip}); ) - + force .nodes(nodes) .links(edges) @@ -39,7 +39,7 @@ charts_linked = (obj, nodes, links, options) -> lcolors = {} nodes.forEach((e) -> lcolors[e.id] = licolors[lla++] - + ) lla = 0 link = g.selectAll(".link") @@ -53,21 +53,21 @@ charts_linked = (obj, nodes, links, options) -> "stroke-width: #{d.value}; stroke: #{lcolors[d.s]};" ).on("mouseover", (d) -> if d.tooltip - tooltip.transition() - .duration(100) - .style("opacity", .9); + tooltip.transition() + .duration(100) + .style("opacity", .9); tooltip.html("#{d.name}:
" + d.tooltip.replace("\n", "
")) - .style("left", (d3.event.pageX + 20) + "px") - .style("top", (d3.event.pageY - 28) + "px"); + .style("left", (d3.event.pageX + 20) + "px") + .style("top", (d3.event.pageY - 28) + "px"); ) .on("mouseout", (d) -> d3.select(this).style("stroke-opacity", "0.375") - tooltip.transition() - .duration(200) - .style("opacity", 0); + tooltip.transition() + .duration(200) + .style("opacity", 0); ) - + defs = svg.append("defs") nodes.forEach( (n) -> if n.gravatar @@ -86,27 +86,27 @@ charts_linked = (obj, nodes, links, options) -> .attr("xlink:href", "https://secure.gravatar.com/avatar/#{n.gravatar}.png?d=identicon") else n.gravatar = false - ) - + ) + node = g.selectAll(".node") .data(nodes) .enter().append("g") .attr("class", "link_node") .attr("data-source", (d) => d.id) .call(force.drag); - + lTargets = [] - + gatherTargets = (d, e) -> if e.source == d or e.target == d lTargets.push(e.source.id) lTargets.push(e.target.id) return true return false - + uptop = svg.append("g") x = null - + node.append("circle") .attr("class", "link_node") .attr("data-source", (d) => d.id) @@ -127,7 +127,7 @@ charts_linked = (obj, nodes, links, options) -> d3.selectAll("path").style("stroke-opacity", "0.075") d3.selectAll("path").filter((e) => gatherTargets(d,e) ).style("stroke-opacity", "1").style("z-index", "20") d3.selectAll("path").filter((e) => e.source == d or e.target).each((o) => - + x = d3.select(this).insert("g", ":first-child").style("stroke", "red !important") x.append("use").attr("xlink:href", "#" + o.name) ) @@ -142,32 +142,32 @@ charts_linked = (obj, nodes, links, options) -> d3.selectAll("text").style("opacity", null) d3.selectAll("path").style("stroke-opacity", null) ) - - + + node.append("a") .attr("href", (d) => if not d.gravatar then "#" else "contributors.html?page=biography&email=#{d.id}") .append("text") .attr("dx", 13) .attr("dy", ".35em") - .text((d) => d.name) + .text((d) => d.name) .on("mouseover", (d) -> if d.tooltip - tooltip.transition() - .duration(100) - .style("opacity", .9); + tooltip.transition() + .duration(100) + .style("opacity", .9); tooltip.html("#{d.name}:
" + d.tooltip.replace("\n", "
")) - .style("left", (d3.event.pageX + 20) + "px") - .style("top", (d3.event.pageY - 28) + "px"); + .style("left", (d3.event.pageX + 20) + "px") + .style("top", (d3.event.pageY - 28) + "px"); ) .on("mouseout", (d) -> #d3.selectAll(".link").filter( (e) => e.source == this.id ).style("stroke-opacity", "0.375") - tooltip.transition() - .duration(200) - .style("opacity", 0); + tooltip.transition() + .duration(200) + .style("opacity", 0); ) - + force.on("tick", () -> link.attr("d", (d) -> dx = d.target.x - d.source.x @@ -192,7 +192,7 @@ charts_linked = (obj, nodes, links, options) -> svg .call( d3.behavior.zoom().center([llwidth / 2, llheight / 2]).scaleExtent([0.333, 4]).on("zoom", linked_zoom) ) - + return [ { svg: svg, @@ -215,5 +215,3 @@ charts_linked = (obj, nodes, links, options) -> .start() }, {linked: true}] - - diff --git a/ui/js/coffee/charts_punchcard.coffee b/ui/js/coffee/charts_punchcard.coffee index 8c07ecf4..77508f49 100644 --- a/ui/js/coffee/charts_punchcard.coffee +++ b/ui/js/coffee/charts_punchcard.coffee @@ -36,12 +36,12 @@ charts_punchcard = (obj, data, options) -> div = d3.select(div) data = data.timeseries days = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'] - + c = [] chart = d3.select(obj).append("svg").attr("width", '100%').attr("height", '100%') - - + + MAX = 0 for k, v of data m = k.split(/ - /) @@ -59,8 +59,8 @@ charts_punchcard = (obj, data, options) -> circles = chart.selectAll('svg').data(c).enter().append("circle"); labels = chart.selectAll('svg').data(days).enter().append('text') slots = chart.selectAll('svg').data([0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23]).enter().append('text') - - + + redraw = () -> xy = obj.getBoundingClientRect() xy.height = xy.width * 0.5 @@ -69,16 +69,16 @@ charts_punchcard = (obj, data, options) -> maxr = Math.sqrt(xy.width**2 + xy.height**2) / 80 cw = (0.03*xy.width) circles.attr("cx", (d) => (d.x*xy.width) + cw/2).attr("cy", (d) => 50 + d.y*xy.height ).attr("r", (d) => pval(d.r, MAX) * maxr).style("fill", (d) => punchcard_color(d.r, MAX)). - on("mouseover", (d) -> + on("mouseover", (d) -> div.transition() - .duration(200) + .duration(200) .style("opacity", .9) - div .html(d.h + d.r.pretty() + " commits") - .style("left", (d3.event.pageX) + "px") + div .html(d.h + d.r.pretty() + " commits") + .style("left", (d3.event.pageX) + "px") .style("top", (d3.event.pageY - 28) + "px"); ).on("mouseout", (d) -> div.transition() - .duration(200) + .duration(200) .style("opacity", 0) ) labels.attr('x', 20).attr('y', (d) => (55 + (0.04 + days.indexOf(d) * 0.10) * xy.height)).attr('font-size', maxr*1.75).text((d) => d) @@ -86,6 +86,5 @@ charts_punchcard = (obj, data, options) -> chart.node().addEventListener("resize", redraw) window.addEventListener("resize", redraw) redraw(); - - return [chart, {punchcard: true}] + return [chart, {punchcard: true}] diff --git a/ui/js/coffee/charts_radar.coffee b/ui/js/coffee/charts_radar.coffee index e2cc01bb..831c1796 100644 --- a/ui/js/coffee/charts_radar.coffee +++ b/ui/js/coffee/charts_radar.coffee @@ -34,7 +34,7 @@ charts_radarchart = (obj, data, options) -> Format = (edge) => Math.floor((edge/24)+0.5) + "↑ (" + (5**(edge/24)).pretty() + ")" d3.select(obj).select("svg").remove(); - + rect = obj.getBoundingClientRect() g = d3.select(obj) .append("svg") @@ -43,7 +43,7 @@ charts_radarchart = (obj, data, options) -> .append("g") .attr("transform", "translate(" + cfg.TranslateX + "," + cfg.TranslateY + ")"); ; - + # Indicator lines for j in [0...cfg.levels] levelFactor = cfg.factor * radius * ( (j+1) / cfg.levels) @@ -60,8 +60,8 @@ charts_radarchart = (obj, data, options) -> .style("stroke-opacity", "0.75") .style("stroke-width", "0.3px") .attr("transform", "translate(" + (cfg.w/2-levelFactor) + ", " + (cfg.h/2-levelFactor) + ")") - - + + # Levels for j in [0...cfg.levels] levelFactor = cfg.factor*radius*((j+1)/cfg.levels); @@ -77,16 +77,16 @@ charts_radarchart = (obj, data, options) -> .attr("transform", "translate(" + (cfg.w/2-levelFactor + cfg.ToRight) + ", " + (cfg.h/2-levelFactor) + ")") .attr("fill", "#737373") .text(Format((j+1)*cfg.maxValue/cfg.levels)) - - + + series = 0 - + axis = g.selectAll(".axis") .data(axes) .enter() .append("g") .attr("class", "axis") - + axis.append("line") .attr("x1", cfg.w/2) .attr("y1", cfg.h/2) @@ -95,7 +95,7 @@ charts_radarchart = (obj, data, options) -> .attr("class", "line") .style("stroke", "grey") .style("stroke-width", "1px"); - + axis.append("text") .attr("class", "legend") .text((d) => d) @@ -106,8 +106,8 @@ charts_radarchart = (obj, data, options) -> .attr("transform", (d,i) => "translate(0, -10)") .attr("x", (d,i) => cfg.w/2*(1-cfg.factorLegend*Math.sin(i*cfg.radians/total))-60*Math.sin(i*cfg.radians/total)) .attr("y", (d,i) => cfg.h/2*(1-Math.cos(i*cfg.radians/total))-20*Math.cos(i*cfg.radians/total)) - - + + d.forEach((y,x) -> dataValues = [] g.selectAll(".nodes") @@ -120,9 +120,9 @@ charts_radarchart = (obj, data, options) -> ]\ ) ) - + dataValues.push(dataValues[0]) - + g.selectAll(".area") .data([dataValues]) .enter() @@ -143,7 +143,7 @@ charts_radarchart = (obj, data, options) -> z = "polygon."+d3.select(this).attr("class"); g.selectAll("polygon") .transition(200) - .style("fill-opacity", 0.1); + .style("fill-opacity", 0.1); g.selectAll(z) .transition(200) .style("fill-opacity", .7); @@ -155,9 +155,9 @@ charts_radarchart = (obj, data, options) -> ) series++ ) - + series = 0 - + d.forEach( (y,x) -> g.selectAll(".nodes") .data(y).enter() @@ -168,7 +168,7 @@ charts_radarchart = (obj, data, options) -> .attr("cx", (j,i) -> dataValues = dataValues || [] dataValues.push([ - cfg.w/2*(1-(parseFloat(Math.max(j.value, 0))/cfg.maxValue)*cfg.factor*Math.sin(i*cfg.radians/total)), + cfg.w/2*(1-(parseFloat(Math.max(j.value, 0))/cfg.maxValue)*cfg.factor*Math.sin(i*cfg.radians/total)), cfg.h/2*(1-(parseFloat(Math.max(j.value, 0))/cfg.maxValue)*cfg.factor*Math.cos(i*cfg.radians/total)) ]) return cfg.w/2*(1-(Math.max(j.value, 0)/cfg.maxValue)*cfg.factor*Math.sin(i*cfg.radians/total)) @@ -181,14 +181,14 @@ charts_radarchart = (obj, data, options) -> .on('mouseover', (d) -> newX = parseFloat(d3.select(this).attr('cx')) - 10 newY = parseFloat(d3.select(this).attr('cy')) - 5 - + tooltip .attr('x', newX) .attr('y', newY) .text(Format(d.value)) .transition(200) .style('opacity', 1) - + z = "polygon."+d3.select(this).attr("class"); g.selectAll("polygon") .transition(200) @@ -207,23 +207,23 @@ charts_radarchart = (obj, data, options) -> ) .append("svg:title") .text((j) => Math.max(j.value, 0)); - + series++ ); - + # Tooltip tooltip = g.append('text') .style('opacity', 0) .style('font-family', 'sans-serif') .style('font-size', '13px'); - + legend = g.append("g") .attr("class", "legend") .attr("height", 100) .attr("width", 200) - .attr('transform', 'translate(90,20)') - - + .attr('transform', 'translate(90,20)') + + legend.selectAll('rect') .data(LegendOptions) .enter() @@ -233,8 +233,8 @@ charts_radarchart = (obj, data, options) -> .attr("width", 10) .attr("height", 10) .style("fill", (d,i) => cfg.color[i]) - - + + legend.selectAll('text') .data(LegendOptions) .enter() @@ -244,7 +244,6 @@ charts_radarchart = (obj, data, options) -> .attr("font-size", "11px") .attr("fill", "#737373") .text((d) => d) - + g.resize = () -> return true return [g, {}] - diff --git a/ui/js/coffee/charts_wrapper.coffee b/ui/js/coffee/charts_wrapper.coffee index 91c84051..150f5ae9 100644 --- a/ui/js/coffee/charts_wrapper.coffee +++ b/ui/js/coffee/charts_wrapper.coffee @@ -117,7 +117,7 @@ copyCSS = (destination, source) -> if (child.tagName in containerElements) copyCSS(child, source.childNodes[cd]) continue - + style = source.childNodes[cd].currentStyle || window.getComputedStyle(source.childNodes[cd]); if (style == "undefined" || style == null) continue @@ -129,7 +129,7 @@ downloadBlob = (name, uri) -> navigator.msSaveOrOpenBlob(uriToBlob(uri), name); else saveLink = document.createElement('a'); - + saveLink.download = name; saveLink.style.display = 'none'; document.body.appendChild(saveLink); @@ -141,26 +141,26 @@ downloadBlob = (name, uri) -> requestAnimationFrame( () -> URL.revokeObjectURL(url) ) - + catch e console.warn('This browser does not support object URLs. Falling back to string URL.'); saveLink.href = uri; - + saveLink.click() document.body.removeChild(saveLink) - - + + chartToSvg = (o, asSVG) -> - + doctype = '' svgdiv = o.chartdiv.getElementsByTagName('svg')[0] svgcopy = svgdiv.cloneNode(true) copyCSS(svgcopy, svgdiv) rect = o.main.getBoundingClientRect() svgcopy.setAttribute('xlink', 'http://www.w3.org/1999/xlink') - + source = (new XMLSerializer()).serializeToString(svgcopy) - + source = source.replace(/(\w+)?:?xlink=/g, 'xmlns:xlink=') source = source.replace(/NS\d+:href/g, 'xlink:href') blob = new Blob([ doctype + source], { type: 'image/svg+xml;charset=utf-8' }) @@ -174,12 +174,12 @@ chartToSvg = (o, asSVG) -> document.getElementById('chartWrapperHiddenMaster').appendChild(canvas) ctx = canvas.getContext('2d') ctx.drawImage(img, 0, 0) - + canvasUrl = canvas.toDataURL("image/png") downloadBlob('chart.png', canvasUrl) - + document.getElementById('chartWrapperHiddenMaster').appendChild(img) - + rotateTable = (list) -> newList = [] for x, i in list[0] @@ -188,7 +188,7 @@ rotateTable = (list) -> arr.push(el[i]) newList.push(arr) return newList - + dataTable = (o) -> modal = new HTML('div', { class: "chartModal"}) modalInner = new HTML('div', { class: "chartModalContent"}) @@ -223,7 +223,7 @@ switchChartType = (o, config, type) -> xtype = m[1] + v.split(/-/)[1]||v config.data.types[k] = xtype o.chartobj = c3.generate(config) - + stackChart = (o, config, chart) -> arr = [] for k, v of config.data.columns @@ -240,33 +240,33 @@ class Chart constructor: (parent, type, data, options) -> cid = parseInt((Math.random()*1000000)).toString(16) @cid = cid - + xxCharts[cid] = this - + # Make main div wrapper @main = new HTML('div', { class: "chartWrapper"}) @main.xThis = this @data = data - + # Make toolbar @toolbar = new HTML('div', {class: "chartToolbar"}) @main.inject(@toolbar) - + # Title bar @titlebar = new HTML('div', {class: "chartTitle"}, if (options and options.title) then options.title else "") @main.inject(@titlebar) - + i = 0 chartWrapperColors = genColors(16, 0.2, 0.75, true) - + # Default to generic buttons btns = chartWrapperButtons.generic.slice(0,999) - + # Line charts have more features than, say, donuts if type == 'line' for el in chartWrapperButtons.line btns.push(el) - + # Make the buttons appear @buttons = {} for btn in btns @@ -279,13 +279,13 @@ class Chart if btn.onclick do (btn, btnDiv) -> btnDiv.addEventListener('click', () -> chartOnclick(btn.onclick, cid)) - + i++ - + # Make inner chart @chartdiv = new HTML('div', { class: "chartChart"}) @main.inject(@chartdiv) - + if parent parent.appendChild(@main) else @@ -294,7 +294,7 @@ class Chart hObj = new HTML('div', { id: 'chartWrapperHiddenMaster', style: { visibility: "hidden"}}) document.body.appendChild(hObj) hObj.appendChild(@main) - + if type == 'line' [@chartobj, @config] = charts_linechart(@chartdiv, data, options) if type == 'donut' @@ -307,8 +307,8 @@ class Chart [@chartobj, @config] = charts_linked(@chartdiv, data.nodes, data.links, options) if type == 'punchcard' [@chartobj, @config] = charts_punchcard(@chartdiv, data, options) - - + + # If this data source has distinguishable categories # show a checkbox to toggle it. if data.distinguishable @@ -324,7 +324,7 @@ class Chart if this.checked distinguish = 'true' globArgs['distinguish'] = 'true' - + updateWidgets('line', null, { distinguish: distinguish }) updateWidgets('gauge', null, { distinguish: distinguish }) ) @@ -337,7 +337,7 @@ class Chart label.style.paddingLeft = '5px' label.appendChild(document.createTextNode('Toggle category breakdown')) @main.inject(label) - + # If this data source has relative weightings # show a checkbox to toggle it. if data.relativeMode @@ -353,7 +353,7 @@ class Chart if this.checked relative = 'true' globArgs['relative'] = 'true' - + updateWidgets('line', null, { relative: relative }) updateWidgets('gauge', null, { relative: relative }) ) @@ -366,6 +366,5 @@ class Chart label.style.paddingLeft = '5px' label.appendChild(document.createTextNode('Toggle relative/comparative mode')) @main.inject(label) - + return @main - \ No newline at end of file diff --git a/ui/js/coffee/colors.coffee b/ui/js/coffee/colors.coffee index 11f9c487..07237d2a 100644 --- a/ui/js/coffee/colors.coffee +++ b/ui/js/coffee/colors.coffee @@ -1,6 +1,6 @@ hsl2rgb = (h, s, l) -> - + h = h % 1; s = 1 if s > 1 l = 1 if l > 1 @@ -79,7 +79,7 @@ genColors = (numColors, saturation, lightness, hex) -> if baseHue < 0 baseHue += 1 c = hsl2rgb(baseHue, saturation, lightness) - if (hex) + if (hex) #h = ( Math.round(c.r*255*255*255) + Math.round(c.g * 255*255) + Math.round(c.b*255) ).toString(16) h = "#" + ("00" + (~ ~(c.r * 255)).toString(16)).slice(-2) + ("00" + (~ ~(c.g * 255)).toString(16)).slice(-2) + ("00" + (~ ~(c.b * 255)).toString(16)).slice(-2); cls.push(h); @@ -90,9 +90,9 @@ genColors = (numColors, saturation, lightness, hex) -> b: parseInt(c.b * 255) }) baseHue -= 0.37 - if (baseHue < 0) + if (baseHue < 0) baseHue += 1 - + return cls @@ -103,12 +103,12 @@ quickColors = (num) -> r = Math.random() g = Math.random() b = Math.random() - + pastel = 0.7 r = ((pastel+r)/2) g = ((pastel+g)/2) b = ((pastel+b)/2) - + c = "#" + ("00" + (~ ~(r * 205)).toString(16)).slice(-2) + ("00" + (~ ~(g * 205)).toString(16)).slice(-2) + ("00" + (~ ~(b * 205)).toString(16)).slice(-2); colors.push(c) return colors diff --git a/ui/js/coffee/combine.sh b/ui/js/coffee/combine.sh index 6d73e941..4c3a9698 100644 --- a/ui/js/coffee/combine.sh +++ b/ui/js/coffee/combine.sh @@ -1,3 +1,2 @@ #!/bin/bash coffee -b --join ../kibble.v1.js -c *.coffee - diff --git a/ui/js/coffee/datepicker.coffee b/ui/js/coffee/datepicker.coffee index 5e3c7b84..7ed32019 100644 --- a/ui/js/coffee/datepicker.coffee +++ b/ui/js/coffee/datepicker.coffee @@ -39,7 +39,7 @@ updateTimeseriesWidgets = (range) -> updateWidgets('phonebook', null, { to: to, from: from}) updateWidgets('worldmap', null, { to: to, from: from}) updateWidgets('jsondump', null, { to: to, from: from}) - + datepicker = (widget) -> div = document.createElement('div') div.setAttribute("class", "well") @@ -72,10 +72,10 @@ datepicker = (widget) -> id = Math.floor(Math.random()*987654321).toString(16) input.setAttribute("id", id) group.appendChild(input) - + widget.inject(div) - - + + datePickerOptions = { startDate: if globArgs.from then moment(new Date(globArgs.from*1000)) else moment().subtract(6, 'months'), endDate: if globArgs.to then moment(new Date(globArgs.to*1000)) else moment(), @@ -118,9 +118,8 @@ datepicker = (widget) -> firstDay: 1 } }; - + $('#' + id).daterangepicker(datePickerOptions, (start, end, label) -> console.log(start._d.getTime()/1000) updateTimeseriesWidgets([Math.max(0, Math.floor(start._d.getTime()/1000)), Math.max(3600, Math.floor(end._d.getTime()/1000))]) ); - diff --git a/ui/js/coffee/error_modal.coffee b/ui/js/coffee/error_modal.coffee index 011263a2..818ea24a 100644 --- a/ui/js/coffee/error_modal.coffee +++ b/ui/js/coffee/error_modal.coffee @@ -8,10 +8,9 @@ badModal = (str) -> modalInner.inject(btndiv) btn = new HTML('button', {class: "btn btn-lg btn-success", onclick:"document.body.removeChild(this.parentNode.parentNode.parentNode);"}, "Gotcha!") btndiv.inject(btn) - + window.setTimeout(() -> modalInner.style.visibility = "visible" modalInner.style.opacity = 1 , 10 ) - diff --git a/ui/js/coffee/explorer.coffee b/ui/js/coffee/explorer.coffee index 8bff59d9..7cfe148f 100644 --- a/ui/js/coffee/explorer.coffee +++ b/ui/js/coffee/explorer.coffee @@ -14,7 +14,7 @@ # limitations under the License. explorer = (json, state) -> - + org = json.organisation h = document.createElement('h2') if json.tag @@ -46,12 +46,12 @@ explorer = (json, state) -> if globArgs.source and globArgs.source == item.sourceID opt.selected = 'selected' list.appendChild(opt) - + ID = Math.floor(Math.random() * 987654321).toString(16) list.setAttribute('id', ID) $("#"+ID).chosen().change(() -> source = this.value - + if source == "" source = null globArgs.source = source @@ -67,9 +67,9 @@ explorer = (json, state) -> updateWidgets('punchcard', null, { source: source }) updateWidgets('jsondump', null, { source: source }) ) - - - + + + # Unique commits label id = Math.floor(Math.random() * 987654321).toString(16) chk = document.createElement('input') @@ -83,7 +83,7 @@ explorer = (json, state) -> if this.checked author = 'true' globArgs['author'] = 'true' - + updateWidgets('donut', null, { author: author }) updateWidgets('gauge', null, { author: author }) updateWidgets('line', null, { author: author }) @@ -107,10 +107,10 @@ explorer = (json, state) -> state.widget.inject(label) br = new HTML('br') p = new HTML('input', {id:'pathfilter', size: 32, type: 'text', value: globArgs.pathfilter, onChange: 'pathFilterGlob = this.value;',placeholder: 'optional path-filter'}) - + state.widget.inject(br) state.widget.inject(p) - + b = new HTML('input', {style: { marginLeft: '10px'}, class: 'btn btn-small btn-success', type: 'button', onClick: 'pathFilter();', value: "filter paths"}) rb = new HTML('input', {style: { marginLeft: '10px'}, class: 'btn btn-small btn-danger', type: 'button', onClick: 'get("pathfilter").value = ""; pathFilterGlob = ""; pathFilter();', value: "reset"}) state.widget.inject(b) @@ -118,7 +118,7 @@ explorer = (json, state) -> sourceexplorer = (json, state) -> - + org = json.organisation h = document.createElement('h4') if json.tag @@ -151,12 +151,12 @@ sourceexplorer = (json, state) -> if globArgs.source and globArgs.source == item.sourceID opt.selected = 'selected' list.appendChild(opt) - + ID = Math.floor(Math.random() * 987654321).toString(16) list.setAttribute('id', ID) $("#"+ID).chosen().change(() -> source = this.value - + if source == "" source = null globArgs.source = source @@ -176,13 +176,13 @@ sourceexplorer = (json, state) -> mailexplorer = (json, state) -> - + org = json.organisation h = document.createElement('h4') if json.tag org.name += " (Filter: " + json.tag + ")" h.appendChild(document.createTextNode("Exploring " + org.name + ":")) - + state.widget.inject(h, true) list = document.createElement('select') state.widget.inject(list) @@ -209,12 +209,12 @@ mailexplorer = (json, state) -> if globArgs.source and globArgs.source == item.sourceID opt.selected = 'selected' list.appendChild(opt) - + ID = Math.floor(Math.random() * 987654321).toString(16) list.setAttribute('id', ID) $("#"+ID).chosen().change(() -> source = this.value - + if source == "" source = null globArgs.source = source @@ -227,17 +227,17 @@ mailexplorer = (json, state) -> updateWidgets('trends', null, { source: source }) updateWidgets('punchcard', null, { source: source }) updateWidgets('relationship', null, { source: source }) - + ) - + logexplorer = (json, state) -> - + org = json.organisation h = document.createElement('h4') if json.tag org.name += " (Filter: " + json.tag + ")" h.appendChild(document.createTextNode("Exploring " + org.name + ":")) - + state.widget.inject(h, true) list = document.createElement('select') state.widget.inject(list) @@ -264,12 +264,12 @@ logexplorer = (json, state) -> if globArgs.source and globArgs.source == item.sourceID opt.selected = 'selected' list.appendChild(opt) - + ID = Math.floor(Math.random() * 987654321).toString(16) list.setAttribute('id', ID) $("#"+ID).chosen().change(() -> source = this.value - + if source == "" source = null globArgs.source = source @@ -281,11 +281,11 @@ logexplorer = (json, state) -> updateWidgets('factors', null, { source: source }) updateWidgets('trends', null, { source: source }) updateWidgets('punchcard', null, { source: source }) - + ) - + issueexplorer = (json, state) -> - + org = json.organisation if json.tag org.name += " (Filter: " + json.tag + ")" @@ -310,7 +310,7 @@ issueexplorer = (json, state) -> opt = document.createElement('option') opt.value = item.sourceID ezURL = null - n = item.sourceURL.match(/^([a-z]+:\/\/.+?)\/([-.A-Z0-9]+)$/i) + n = item.sourceURL.match(/^([a-z]+:\/\/.+?)\/([-.A-Z0-9]+)$/i) m = item.sourceURL.match(/^([a-z]+:\/\/.+?)\s(.+)$/i) if n and n.length == 3 ezURL = "#{n[2]} - (#{n[1]})" @@ -320,12 +320,12 @@ issueexplorer = (json, state) -> if globArgs.source and globArgs.source == item.sourceID opt.selected = 'selected' list.appendChild(opt) - + ID = Math.floor(Math.random() * 987654321).toString(16) list.setAttribute('id', ID) $("#"+ID).chosen().change(() -> source = this.value - + if source == "" source = null globArgs.source = source @@ -337,13 +337,13 @@ issueexplorer = (json, state) -> updateWidgets('factors', null, { source: source }) updateWidgets('trends', null, { source: source }) updateWidgets('punchcard', null, { source: source }) - + ) - + forumexplorer = (json, state) -> - + org = json.organisation if json.tag org.name += " (Filter: " + json.tag + ")" @@ -368,7 +368,7 @@ forumexplorer = (json, state) -> opt = document.createElement('option') opt.value = item.sourceID ezURL = null - n = item.sourceURL.match(/^([a-z]+:\/\/.+?)\/([-.A-Z0-9]+)$/i) + n = item.sourceURL.match(/^([a-z]+:\/\/.+?)\/([-.A-Z0-9]+)$/i) m = item.sourceURL.match(/^([a-z]+:\/\/.+?)\s(.+)$/i) if n and n.length == 3 ezURL = "#{n[2]} - (#{n[1]})" @@ -378,12 +378,12 @@ forumexplorer = (json, state) -> if globArgs.source and globArgs.source == item.sourceID opt.selected = 'selected' list.appendChild(opt) - + ID = Math.floor(Math.random() * 987654321).toString(16) list.setAttribute('id', ID) $("#"+ID).chosen().change(() -> source = this.value - + if source == "" source = null globArgs.source = source @@ -395,13 +395,13 @@ forumexplorer = (json, state) -> updateWidgets('factors', null, { source: source }) updateWidgets('trends', null, { source: source }) updateWidgets('punchcard', null, { source: source }) - + ) - + imexplorer = (json, state) -> - + org = json.organisation if json.tag org.name += " (Filter: " + json.tag + ")" @@ -426,7 +426,7 @@ imexplorer = (json, state) -> opt = document.createElement('option') opt.value = item.sourceID ezURL = null - n = item.sourceURL.match(/^([a-z]+:\/\/.+?)\/([#\S+]+)$/i) + n = item.sourceURL.match(/^([a-z]+:\/\/.+?)\/([#\S+]+)$/i) m = item.sourceURL.match(/^([a-z]+:\/\/.+?)\s(.+)$/i) if n and n.length == 3 ezURL = "#{n[2]} - (#{n[1]})" @@ -436,12 +436,12 @@ imexplorer = (json, state) -> if globArgs.source and globArgs.source == item.sourceID opt.selected = 'selected' list.appendChild(opt) - + ID = Math.floor(Math.random() * 987654321).toString(16) list.setAttribute('id', ID) $("#"+ID).chosen().change(() -> source = this.value - + if source == "" source = null globArgs.source = source @@ -453,13 +453,13 @@ imexplorer = (json, state) -> updateWidgets('factors', null, { source: source }) updateWidgets('trends', null, { source: source }) updateWidgets('punchcard', null, { source: source }) - + , false) $('select').chosen(); - + ciexplorer = (json, state) -> - + org = json.organisation if json.tag org.name += " (Filter: " + json.tag + ")" @@ -484,7 +484,7 @@ ciexplorer = (json, state) -> opt = document.createElement('option') opt.value = item.sourceID ezURL = null - n = item.sourceURL.match(/^([a-z]+:\/\/.+?)\/([#\S+]+)$/i) + n = item.sourceURL.match(/^([a-z]+:\/\/.+?)\/([#\S+]+)$/i) m = item.sourceURL.match(/^([a-z]+:\/\/.+?)\s(.+)$/i) if n and n.length == 3 ezURL = "#{n[2]} - (#{n[1]})" @@ -494,12 +494,12 @@ ciexplorer = (json, state) -> if globArgs.source and globArgs.source == item.sourceID opt.selected = 'selected' list.appendChild(opt) - + ID = Math.floor(Math.random() * 987654321).toString(16) list.setAttribute('id', ID) $("#"+ID).chosen().change(() -> source = this.value - + if source == "" source = null globArgs.source = source @@ -512,9 +512,9 @@ ciexplorer = (json, state) -> updateWidgets('trends', null, { source: source }) updateWidgets('relationship', null, { source: source }) updateWidgets('punchcard', null, { source: source }) - + ) - + multiviewexplorer = (json, state) -> org = json.organisation @@ -547,7 +547,7 @@ multiviewexplorer = (json, state) -> if globArgs[tName] and globArgs[tName] == item.id opt.selected = 'selected' list.appendChild(opt) - + ID = Math.floor(Math.random() * 987654321).toString(16) list.setAttribute('id', ID) $("#"+ID).chosen().change(() -> @@ -568,7 +568,7 @@ multiviewexplorer = (json, state) -> updateWidgets('radar', null, x) updateWidgets('punchcard', null, x) ) - + subFilterGlob = null subFilter = () -> source = subFilterGlob @@ -601,7 +601,7 @@ subFilter = () -> updateWidgets('worldmap', null, x) updateWidgets('jsondump', null, x) updateWidgets('punchcard', null, x) - + $( "a" ).each( () -> url = $(this).attr('href') if url @@ -640,7 +640,7 @@ pathFilter = () -> updateWidgets('worldmap', null, x) updateWidgets('jsondump', null, x) updateWidgets('punchcard', null, x) - + $( "a" ).each( () -> url = $(this).attr('href') if url @@ -651,7 +651,7 @@ pathFilter = () -> else $(this).attr('href', "#{m[1]}#{m[2]}") ) - + viewexplorer = (json, state) -> org = json.organisation @@ -683,7 +683,7 @@ viewexplorer = (json, state) -> if globArgs[tName] and globArgs[tName] == item.id opt.selected = 'selected' list.appendChild(opt) - + ID = Math.floor(Math.random() * 987654321).toString(16) list.setAttribute('id', ID) $("#"+ID).chosen().change(() -> @@ -716,7 +716,7 @@ viewexplorer = (json, state) -> updateWidgets('worldmap', null, x) updateWidgets('jsondump', null, x) updateWidgets('punchcard', null, x) - + $( "a" ).each( () -> url = $(this).attr('href') if url @@ -727,9 +727,9 @@ viewexplorer = (json, state) -> else $(this).attr('href', "#{m[1]}#{m[2]}") ) - + ) - + # Quick filter state.widget.inject(new HTML('br')) i = new HTML('input', {id:'subfilter', size: 16, type: 'text', value: globArgs.subfilter, onChange: 'subFilterGlob = this.value;', placeholder: 'sub-filter'}) @@ -738,8 +738,8 @@ viewexplorer = (json, state) -> state.widget.inject(i) state.widget.inject(b) state.widget.inject(rb) - - + + if globArgs.subfilter and globArgs.subfilter.length > 0 source = globArgs.subfilter $( "a" ).each( () -> @@ -752,13 +752,13 @@ viewexplorer = (json, state) -> else $(this).attr('href', "#{m[1]}#{m[2]}") ) - + if globArgs.email div = new HTML('div', {}, "Currently filtering results based on " + globArgs.email + ". - ") div.inject(new HTML('a', { href: 'javascript:void(filterPerson(null));'}, "Reset filter")) state.widget.inject(div) - - + + widgetexplorer = (json, state) -> pwidgets = { @@ -817,7 +817,7 @@ widgetexplorer = (json, state) -> if globArgs[tName] and globArgs[tName] == key opt.selected = 'selected' list.appendChild(opt) - + list.addEventListener("change", () -> source = this.value if source == "" @@ -836,7 +836,5 @@ widgetexplorer = (json, state) -> updateWidgets('trends', null, x) updateWidgets('radar', null, x) updateWidgets('punchcard', null, x) - + , false) - - diff --git a/ui/js/coffee/kibble_account.coffee b/ui/js/coffee/kibble_account.coffee index fe8e773a..9158d753 100644 --- a/ui/js/coffee/kibble_account.coffee +++ b/ui/js/coffee/kibble_account.coffee @@ -41,32 +41,32 @@ accountCallback = (json, state) -> else t = new HTML('p', {}, "Please check your email account for a verification email.") obj.appendChild(t) - + kibbleSignup = (form) -> email = form.email.value displayName = form.displayname.value password = form.password.value password2 = form.password2.value - + # Passwords must match if password != password2 alert("Passwords must match!") return false - + # Username must be >= 2 chars if displayName.length < 2 alert("Please enter a proper display name!") return false - + # Email must be valid if not email.match(/([^@]+@[^.]+\.[^.])/) alert("Please enter a valid email address!") return false - + put('account', { email: email, password: password, displayname: displayName }, null, accountCallback) - - return false \ No newline at end of file + + return false diff --git a/ui/js/coffee/kibble_organisation.coffee b/ui/js/coffee/kibble_organisation.coffee index f6a4edc7..07b3490b 100644 --- a/ui/js/coffee/kibble_organisation.coffee +++ b/ui/js/coffee/kibble_organisation.coffee @@ -16,8 +16,8 @@ keyValueForm = (type, key, caption, placeholder) -> div = new HTML('div', { style: { width: "100%", margin: "10px", paddingBottom: "10px"}}) left = new HTML('div', { style: { float: "left", width: "300px", fontWeight: "bold"}}, caption) - right = new HTML('div', { style: { float: "left", width: "500px"}}) - + right = new HTML('div', { style: { float: "left", width: "500px"}}) + if type == 'text' inp = new HTML('input', {name: key, id: key, style: { marginBottom: "10px"}, class: "form-control", type: "text", placeholder: placeholder}) right.inject(inp) @@ -76,34 +76,34 @@ orglist = (json, state) -> new HTML('kbd', {}, ""+org.sourceCount.pretty()), " sources so far." ]) - + odiv.inject(div) if not isDefault dbtn = new HTML('input', { style: { marginTop: "10px", width: "120px"},class: "btn btn-primary btn-block", type: "button", onclick: "setDefaultOrg('#{org.id}');", value: "Set as current"}) div.inject(dbtn) odiv.inject(new HTML('hr')) state.widget.inject(odiv, true) - + if userAccount.userlevel == "admin" fieldset = new HTML('fieldset', { style: { float: "left", margin: '30px'}}) legend = new HTML('legend', {}, "Create a new organisation:") fieldset.inject(legend) - + fieldset.inject(keyValueForm('text', 'orgname', 'Name of the organisation:', 'Foo, inc.')) fieldset.inject(keyValueForm('textarea', 'orgdesc', 'Description:', 'Foo, inc. is awesome and does stuff.')) fieldset.inject(keyValueForm('text', 'orgid', 'Optional org ID:', 'demo, myorg etc')) - + fieldset.inject(new HTML('p', {}, "You'll be able to add users and owners once the organisation has been created.")) - + btn = new HTML('input', { style: { width: "200px"},class: "btn btn-primary btn-block", type: "button", onclick: "makeOrg();", value: "Create organisation"}) fieldset.inject(btn) - + state.widget.inject(fieldset) - + inviteMember = (eml, admin) -> put('org/members', { email: eml, admin: admin}, null, memberInvited) - + removeMember = (eml, admin) -> xdelete('org/members', { email: eml, admin: admin}, null, memberInvited) @@ -115,7 +115,7 @@ memberInvited = (json, state) -> ) membershipList = (json, state) -> - + # Invite member form h = new HTML('h3', {}, "Invite a member to #{userAccount.defaultOrganisation}") state.widget.inject(h, true) @@ -124,13 +124,13 @@ membershipList = (json, state) -> state.widget.inject(inp) state.widget.inject(btn) state.widget.inject(new HTML('hr')) - - + + # Existing membership list h = new HTML('h3', {}, "Current membership of #{userAccount.defaultOrganisation}:") state.widget.inject(h) list = new HTML('table', { style: { margin: "20px", border: "1px solid #666"}}) - + for member in json.members tr = new HTML('tr', { style: { borderBottom: "1px solid #666"}}) eml = new HTML('td', { style: { padding: "5px"}}, member) @@ -140,17 +140,15 @@ membershipList = (json, state) -> if isAdmin alink = new HTML('a', { href: "javascript:void(inviteMember('#{member}', false));"}, "Remove as admin") admopt = new HTML('td', { style: { padding: "5px"}}, alink) - + # Remove member dlink = new HTML('a', { href: "javascript:void(removeMember('#{member}'));"}, "Remove from organisation") delopt = new HTML('td', { style: { padding: "5px"}}, dlink) - + tr.inject(eml) tr.inject(admin) tr.inject(admopt) tr.inject(delopt) list.inject(tr) - - state.widget.inject(list) - + state.widget.inject(list) diff --git a/ui/js/coffee/misc.coffee b/ui/js/coffee/misc.coffee index 30375772..49fdcfdd 100644 --- a/ui/js/coffee/misc.coffee +++ b/ui/js/coffee/misc.coffee @@ -35,7 +35,7 @@ fetch = (url, xstate, callback, nocreds) -> # GET URL xmlHttp.open("GET", "api/#{url}", true); xmlHttp.send(null); - + xmlHttp.onreadystatechange = (state) -> if xmlHttp.readyState == 4 and xmlHttp.status == 500 if snap @@ -70,7 +70,7 @@ put = (url, json, xstate, callback, nocreds = false) -> # GET URL xmlHttp.open("PUT", "api/#{url}", true); xmlHttp.send(JSON.stringify(json || {})); - + xmlHttp.onreadystatechange = (state) -> if xmlHttp.readyState == 4 and xmlHttp.status == 500 if snap @@ -105,7 +105,7 @@ patch = (url, json, xstate, callback, nocreds = false) -> # GET URL xmlHttp.open("PATCH", "api/#{url}", true); xmlHttp.send(JSON.stringify(json || {})); - + xmlHttp.onreadystatechange = (state) -> if xmlHttp.readyState == 4 and xmlHttp.status == 500 if snap @@ -139,7 +139,7 @@ xdelete = (url, json, xstate, callback, nocreds = false) -> # GET URL xmlHttp.open("DELETE", "api/#{url}", true); xmlHttp.send(JSON.stringify(json || {})); - + xmlHttp.onreadystatechange = (state) -> if xmlHttp.readyState == 4 and xmlHttp.status == 500 if snap @@ -180,12 +180,12 @@ post = (url, json, xstate, callback, snap) -> if val == 'false' json[key] = false fdata = JSON.stringify(json) - + # POST URL xmlHttp.open("POST", "api/#{url}", true); xmlHttp.setRequestHeader("Content-type", "application/json"); xmlHttp.send(fdata); - + xmlHttp.onreadystatechange = (state) -> if xmlHttp.readyState == 4 and xmlHttp.status == 500 if snap @@ -477,14 +477,14 @@ isArray = ( value ) -> typeof value.length is 'number' and typeof value.splice is 'function' and not ( value.propertyIsEnumerable 'length' ) - + ### isHash: function to detect if an object is a hash ### isHash = (value) -> value and typeof value is 'object' and not isArray(value) - + class HTML constructor: (type, params, children) -> @@ -493,7 +493,7 @@ class HTML @element = type.cloneNode() else @element = document.createElement(type) - + ### If params have been passed, set them ### if isHash(params) for key, val of params @@ -509,7 +509,7 @@ class HTML if not @element[key] throw "No such attribute, #{key}!" @element[key][subkey] = subval - + ### If any children have been passed, add them to the element ### if children ### If string, convert to textNode using txt() ### @@ -545,4 +545,4 @@ HTMLElement.prototype.inject = (child) -> if typeof child is 'string' child = txt(child) this.appendChild(child) - return child \ No newline at end of file + return child diff --git a/ui/js/coffee/pageloader.coffee b/ui/js/coffee/pageloader.coffee index b1d9f1ac..80af654e 100644 --- a/ui/js/coffee/pageloader.coffee +++ b/ui/js/coffee/pageloader.coffee @@ -26,7 +26,7 @@ setupPage = (json, state) -> div.style.textAlign = 'center' div.innerHTML = "

An error occurred:

" + json.error + "

" return - + # View active? if userAccount.view and userAccount.view.length > 0 vrow = new Row() @@ -46,10 +46,10 @@ setupPage = (json, state) -> set(a, 'href', '?page=views') app(p, a) vrow.inject(p) - - document.title = json.title + " - Apache Kibble" + + document.title = json.title + " - Apache Kibble" # Go through each row - + for r in json.rows row = new Row() @@ -72,7 +72,7 @@ setupPage = (json, state) -> for k, v of child.wargs widget.wargs[k] = v if child.type not in ['views', 'sourcelist'] - widget.args.eargs.quick = 'true' + widget.args.eargs.quick = 'true' switch child.type when 'datepicker' then datepicker(widget) @@ -163,7 +163,7 @@ loadPageWidgets = (page, apiVersion) -> if m if globArgs.view $(this).attr('href', "#{m[1]}&view=#{globArgs.view}#{m[2]}") - + ) # Fetch account info fetch('session', null, renderAccountInfo) @@ -180,14 +180,14 @@ renderAccountInfo = (json, state) -> userAccount = json img = document.getElementById('user_image') img.setAttribute("src", "https://secure.gravatar.com/avatar/" + json.gravatar + ".png") - + name = document.getElementById('user_name') name.innerHTML = "" name.appendChild(document.createTextNode(json.displayName)) - + ulevel = get('user_level') ulevel.inject(if json.userlevel == 'admin' then 'Administrator' else if json.defaultOrganisation in json.ownerships then 'Organisation Owner' else 'User') - + nm = get('messages_number') nm.innerHTML = json.messages if json.messages > 0 @@ -206,13 +206,13 @@ renderAccountInfo = (json, state) -> msp = mk('span') app(msp, txt(email.senderName)) app(ma, msp) - + msp = mk('span') set(msp, 'class', 'message') app(msp, txt(email.subject)) app(ma, msp) app(mli, ma) app(nl, mli) - + # Fetch widget list fetch('widgets/' + pageID, { gargs: globArgs }, setupPage) diff --git a/ui/js/coffee/phonebook.coffee b/ui/js/coffee/phonebook.coffee index c43264d5..16922c5c 100644 --- a/ui/js/coffee/phonebook.coffee +++ b/ui/js/coffee/phonebook.coffee @@ -7,7 +7,7 @@ phonebook = (json, state) -> obj.innerText = "Found #{json.people.length} contributors.." obj.inject(new HTML('br')) state.widget.inject(obj, true) - + json.people.sort( (a,b) => if a.name < b.name return -1 @@ -15,7 +15,7 @@ phonebook = (json, state) -> return 1 return 0 ) - + for i, item of json.people if i > 250 break @@ -33,6 +33,3 @@ phonebook = (json, state) -> idiv.inject(left) idiv.inject(right) obj.inject(idiv) - - - \ No newline at end of file diff --git a/ui/js/coffee/sources.coffee b/ui/js/coffee/sources.coffee index c0012375..ebea14dd 100644 --- a/ui/js/coffee/sources.coffee +++ b/ui/js/coffee/sources.coffee @@ -70,9 +70,9 @@ deletesource = (hash) -> tr = get(hash) tr.parentNode.removeChild(tr) xdelete('sources', { id: hash }, null, null) - + sourceTypes = { - + } getSourceType = (main, t) -> if not sourceTypes[t] @@ -92,7 +92,7 @@ getSourceType = (main, t) -> app(tr, td) app(thead, tr) app(tbl, thead) - + tbody = new HTML('tbody') app(tbl, tbody) obj.inject(tbl) @@ -105,7 +105,7 @@ getSourceType = (main, t) -> return sourceTypes[t] sourcelist = (json, state) -> - + slist = mk('div') vlist = new HTML('div') if json.sources @@ -137,13 +137,13 @@ sourcelist = (json, state) -> d = mk('tr') set(d, 'id', source.sourceID) set(d, 'scope', 'row') - - + + t = mk('td') t.style.color = "#369" app(t, txt(source.sourceURL)) app(d, t) - + # Progress lastUpdate = 0 lastFailure = null @@ -157,9 +157,9 @@ sourcelist = (json, state) -> evolution: 'fa fa-signal' mail: 'fa fa-envelope' issues: 'fa fa-feed' - + t = new HTML('td', { style: { minWidth: "260px !important"}}) - + borked = false steps = ['sync', 'census', 'count', 'evolution'] if source.type in ['mail', 'ponymail', 'pipermail', 'hyperkitty'] @@ -202,7 +202,7 @@ sourcelist = (json, state) -> set(t, 'data-steps-failure', 'false') t.style.minWidth = "260px" app(d, t) - + lu = "Unknown" if lastUpdate > 0 lu = "" @@ -212,16 +212,16 @@ sourcelist = (json, state) -> if h > 0 lu = h + " hour" + (if h == 1 then '' else 's') + ", " lu += m + " minute" + (if m == 1 then '' else 's') + " ago." - + t = mk('td') t.style.textAlign = 'right' t.style.color = "#963" t.style.width = "200px !important" app(t, txt(lu)) app(d, t) - - - + + + status = mk('td') status.style.width = "600px !important" if lastFailure @@ -240,7 +240,7 @@ sourcelist = (json, state) -> else app(status, txt("No errors detected.")) app(d, status) - + act = mk('td') dbtn = mk('button') set(dbtn, 'class', 'btn btn-danger') @@ -248,10 +248,10 @@ sourcelist = (json, state) -> dbtn.style.padding = "2px" app(dbtn, txt("Delete")) app(act, dbtn) - + app(d, act) tbody.inject(d) - + for t, el of sourceTypes div = new HTML('div', {class: "sourceTypeIcon", onclick: "showType('#{t}');"}) el.btn = div @@ -262,12 +262,12 @@ sourcelist = (json, state) -> #app(slist, tbl) state.widget.inject(slist, true) state.widget.inject(vlist) - + retval = mk('div') set(retval, 'id', 'retval') state.widget.inject(retval) showType(true) # Show first available type - + showType = (t) -> for st, el of sourceTypes if st == t or t == true @@ -304,7 +304,7 @@ sourceadd = (json, state) -> div.inject(lbl) obj.inject(new HTML('p', {}, el.description or "")) obj.inject(keyValueForm('textarea', 'source', 'Source URL/ID:', "For example: " + el.example + ". You can add multiple sources, one per line.")) - + if el.optauth obj.inject((if el.authrequired then "Required" else "Optional") + " authentication options:") for abit in el.optauth @@ -314,12 +314,12 @@ sourceadd = (json, state) -> state.widget.inject(div, true) for k, v of aSourceTypes state.widget.inject(v) - + sourceAdded = (json, state) -> window.setTimeout(() -> location.reload() , 1000) - + addSources = (type, form) -> jsa = [] lineNo = 0 diff --git a/ui/js/coffee/widget.coffee b/ui/js/coffee/widget.coffee index 8e67d552..c578ee8e 100644 --- a/ui/js/coffee/widget.coffee +++ b/ui/js/coffee/widget.coffee @@ -29,10 +29,10 @@ toFullscreen = (id) -> FSA = get('FS_' + id) FSA.innerHTML = "Pop back" FSA.setAttribute("onclick", "toNormal('" + id + "');") - + CW = get('CW_' + id) CW.setAttribute("onclick", "toNormal('" + id + "');") - + w = findWidget(id) w.parent = obj.parentNode w.sibling = null @@ -44,7 +44,7 @@ toFullscreen = (id) -> break else if node == obj dobrk = true - + w.sibling = nxt ic = get('innercontents') app(ic, obj) @@ -64,14 +64,14 @@ toFullscreen = (id) -> toNormal = (id) -> obj = get(id) w = findWidget(id) - + FSA = get('FS_' + id) FSA.innerHTML = "Fullscreen" FSA.setAttribute("onclick", "toFullscreen('" + id + "');") - + CW = get('CW_' + id) CW.setAttribute("onclick", "findWidget('"+id+"').kill();") - + if w.sibling w.parent.insertBefore(obj, w.sibling) else @@ -98,7 +98,7 @@ updateWidgets = (type, target, eargs) -> console.log("pushed state " + wloc) window.onpopstate = (event) -> loadPageWidgets() - + for widget in widgetCache if type == widget.args.type widget.args.target = target and target or widget.args.target @@ -146,32 +146,32 @@ class pubWidget if clear @div.innerHTML = "" @div.appendChild(el) - + class Widget constructor: (@blocks, @args, pub) -> @id = Math.floor(Math.random()*1000000).toString(16) - + # Parent object div @div = document.createElement('div') @div.setAttribute("id", @id) @div.setAttribute("class", "x_panel snoot_widget") @div.style.float = 'left' @json = {} - - if (@blocks <= 2) + + if (@blocks <= 2) @div.setAttribute("class", "snoot_widget col-md-2 col-sm-4 col-xs-12") - else if (@blocks <= 3) + else if (@blocks <= 3) @div.setAttribute("class", "snoot_widget col-md-3 col-sm-6 col-xs-12") - else if (@blocks <= 4) + else if (@blocks <= 4) @div.setAttribute("class", "snoot_widget col-md-4 col-sm-8 col-xs-12") - else if (@blocks <= 6) + else if (@blocks <= 6) @div.setAttribute("class", "snoot_widget col-md-6 col-sm-12 col-xs-12") - else if (@blocks <= 9) + else if (@blocks <= 9) @div.setAttribute("class", "snoot_widget col-md-9 col-sm-12 col-xs-12") else @div.setAttribute("class", "snoot_widget col-md-12 col-sm-12 col-xs-12") - - + + if not pub # Title t = document.createElement('div') @@ -180,11 +180,11 @@ class Widget tt.style.fontSize = "17pt" tt.appendChild(document.createTextNode(@args.name)) t.appendChild(tt) - - # Menu + + # Menu ul = document.createElement('ul') ul.setAttribute("class", "nav navbar-right panel_toolbox") - + # Menu: collapse widget li = document.createElement('li') @collapse = document.createElement('a') @@ -194,7 +194,7 @@ class Widget @collapse.appendChild(i) li.appendChild(@collapse) ul.appendChild(li) - + @collapse.addEventListener "click", () -> id = this.parentNode.parentNode.parentNode.parentNode.getAttribute("id") panel = $('#'+id) @@ -208,10 +208,10 @@ class Widget else content.slideToggle(200) panel.css('height', 'auto') - + icon.toggleClass('fa-chevron-up fa-chevron-down'); - - + + # Menu: remove widget li = document.createElement('li') a = document.createElement('a') @@ -223,22 +223,22 @@ class Widget a.setAttribute("id", "CW_" + @id) li.appendChild(a) ul.appendChild(li) - + t.appendChild(ul) - + @div.appendChild(t) - + cldiv = document.createElement('div') cldiv.setAttribute("classs", "clearfix") @div.appendChild(cldiv) - + @cdiv = document.createElement('div') @cdiv.style.width = "100%" @cdiv.setAttribute("id", "contents_" + @id) @cdiv.setAttribute("class", "x_content") @div.appendChild(@cdiv) widgetCache.push(this) - + cog: (size = 100) -> idiv = document.createElement('div') idiv.setAttribute("class", "icon") @@ -249,20 +249,20 @@ class Widget idiv.appendChild(document.createTextNode('Loading, hang on tight..!')) @cdiv.innerHTML = "" @cdiv.appendChild(idiv) - + kill: () -> @div.parentNode.removeChild(@div) - + inject: (object, clear) -> if clear @cdiv.innerHTML = "" @cdiv.style.textAlign = 'left' @cdiv.appendChild(object) - + snap: (state) -> state.widget.cdiv.innerHTML = "
Oh snap, something went wrong!" state.widget.cdiv.style.textAlign = 'center' - + load: (callback) -> # Insert spinning cog this.cog() @@ -298,11 +298,10 @@ class Row @cdiv.setAttribute("id", "contents_" + @id) @div.appendChild(@cdiv) document.getElementById('innercontents').appendChild(@div) - + inject: (object, clear) -> @cdiv.innerHTML = "" if clear if object instanceof Widget @cdiv.appendChild(object.div) else @cdiv.appendChild(object) - diff --git a/ui/js/coffee/widget_admin.coffee b/ui/js/coffee/widget_admin.coffee index 2ffa0d00..72e79747 100644 --- a/ui/js/coffee/widget_admin.coffee +++ b/ui/js/coffee/widget_admin.coffee @@ -1,5 +1,5 @@ orgadmin = (json, state) -> - + if globArgs.org and json.admin[globArgs.org] pdiv = document.createElement('div') id = globArgs.org @@ -7,12 +7,12 @@ orgadmin = (json, state) -> h2 = mk('h2') app(h2, txt("Editing: " + title)) app(pdiv, h2) - + obj = mk('form') h4 = mk('h4') app(h4, txt("Invite a new user to this org:")) app(obj, h4) - + div = mk('div') app(div, txt("Username (email): ")) inp = mk('input') @@ -21,7 +21,7 @@ orgadmin = (json, state) -> inp.style.width = "200px" app(div, inp) app(obj, div) - + div = mk('div') app(div, txt("Make administrator: ")) inp = mk('input') @@ -30,21 +30,21 @@ orgadmin = (json, state) -> set(inp, 'value', 'true') app(div, inp) app(obj, div) - + btn = mk('input') set(btn, 'type', 'button') set(btn, 'onclick', 'addorguser(this.form)') set(btn, 'value', "Add user") app(obj, btn) - + app(pdiv, obj) - - + + obj = mk('form') h4 = mk('h4') app(h4, txt("Remove a user from the org:")) app(obj, h4) - + div = mk('div') app(div, txt("Username (email): ")) inp = mk('input') @@ -53,7 +53,7 @@ orgadmin = (json, state) -> inp.style.width = "200px" app(div, inp) app(obj, div) - + div = mk('div') app(div, txt("Just remove admin privs (if any): ")) inp = mk('input') @@ -62,20 +62,20 @@ orgadmin = (json, state) -> set(inp, 'value', 'true') app(div, inp) app(obj, div) - + btn = mk('input') set(btn, 'type', 'button') set(btn, 'onclick', 'remorguser(this.form)') set(btn, 'value', "Remove user") app(obj, btn) - + app(pdiv, obj) - + state.widget.inject(pdiv, true) else state.widget.inject(txt("You are not an admin of this organisation!")) - + addorguser = (form) -> js = { @@ -91,7 +91,7 @@ addorguser = (form) -> v = if form[i].checked then 'true' else 'false' if k in ['who', 'admin'] js[k] = v - + postJSON("admin-org", js, null, (a) -> alert("User added!") ) remorguser = (form) -> @@ -108,6 +108,5 @@ remorguser = (form) -> v = if form[i].checked then 'true' else 'false' if k in ['who', 'admin'] js[k] = v - + postJSON("admin-org", js, null, (a) -> alert("User removed!") ) - \ No newline at end of file diff --git a/ui/js/coffee/widget_affiliations.coffee b/ui/js/coffee/widget_affiliations.coffee index e0747e63..f91385f0 100644 --- a/ui/js/coffee/widget_affiliations.coffee +++ b/ui/js/coffee/widget_affiliations.coffee @@ -12,7 +12,7 @@ affiliation = (json, state) -> ngroups-- app(h3, txt("Found " + ngroups + " organisations/companies:")) app(obj, h3) - + btn = mk('input') set(btn, 'type', 'button') set(btn, 'class', 'btn btn-info') @@ -26,7 +26,7 @@ affiliation = (json, state) -> w.callback = affiliationWizard w.reload() ) - + p = mk('p') app(p, txt("You may use the ")) app(p, btn) @@ -132,7 +132,7 @@ affiliationWizard = (json, state) -> app(gdiv, sp) app(gdiv, mk('br')) app(obj,gdiv) - + btn = mk('input') set(btn, 'type', 'button') set(btn, 'class', 'btn btn-info') @@ -167,7 +167,7 @@ affiliate = (hash) -> if tag postJSON('affiliations', { tag: tags }, null, null) app(tr, txt("(Tagged as: " + tag + ") ")) - + altemail = (hash) -> tag = window.prompt("Please enter the alt email with which you wish to associate this source, or type nothing to clear alts.") if tag == "" @@ -178,4 +178,3 @@ altemail = (hash) -> if tag postJSON('affiliations', { altemail: tags }, null, null) app(tr, txt("(Affiliated as: " + tag + ") ")) - \ No newline at end of file diff --git a/ui/js/coffee/widget_bio.coffee b/ui/js/coffee/widget_bio.coffee index 4f4d3eb0..e8b55b47 100644 --- a/ui/js/coffee/widget_bio.coffee +++ b/ui/js/coffee/widget_bio.coffee @@ -13,7 +13,7 @@ bio = (json, state) -> firstemail = "Never" if json.bio.firstEmail firstemail = new Date(json.bio.firstEmail*1000).toDateString() - + bioOuter = new HTML('div', { class: 'media-event'} ) bioOuter.inject(new HTML('a', { class: 'pull-left bio-image'}, new HTML('img', { style: "width: 128px; height: 128px;", src: 'https://secure.gravatar.com/avatar/' + json.bio.gravatar + '.png?d=identicon&size=128'}) @@ -66,7 +66,7 @@ bio = (json, state) -> a = mk('a') set(a, 'href', 'javascript:void(affiliate("' + json.bio.id + '"));') app(a, txt("Set a tag")) - + egroups = [] if json.bio.alts and json.bio.alts.length for tag in json.bio.alts @@ -78,14 +78,14 @@ bio = (json, state) -> a2.style.marginLeft = "8px" set(a2, 'href', 'javascript:void(altemail("' + json.bio.id + '"));') app(a2, txt("Add alt email")) - + sp = mk('span') set(sp, 'id', 'tags_' + json.bio.id) app(obj, namecard) app(obj, a) app(obj, a2) app(obj, sp) - + else obj.innerHTML = "Person not found :/" state.widget.inject(obj, true) diff --git a/ui/js/coffee/widget_comstat.coffee b/ui/js/coffee/widget_comstat.coffee index a23cc42c..62381d80 100644 --- a/ui/js/coffee/widget_comstat.coffee +++ b/ui/js/coffee/widget_comstat.coffee @@ -4,9 +4,9 @@ comShow = (t) -> if (row.getAttribute("id")||"foo").match("comstat_#{t}_") row.style.display = "table-row" document.getElementById("comstat_#{t}_more").style.display = "none" - + comstat = (json, state) -> - + if json and json.stats row = new Row() p = new HTML('p', {}, @@ -21,12 +21,12 @@ comstat = (json, state) -> onchange: 'updateWidgets("comstat", null, { committersOnly: this.checked ? "true" : null });' }) lb = new HTML('label', { for: 'comonly' }, "Show only new committers, discard new authors.") - + row.inject(p) row.inject(chk) row.inject(lb) state.widget.inject(row.div, true) - + if json.stats.code.seen > 0 row = new Row() js = { alphaSort: true, counts: { @@ -44,7 +44,7 @@ comstat = (json, state) -> if json.stats.code.newcomers.length and json.stats.code.newcomers.length >= 0 nl = json.stats.code.newcomers.length stbl = new Widget(6, { name: "New code contributors (#{nl})" }) - + tbl = mk('table', {class: "table table-striped"}) tr = mk('tr', {}, [ mk('th', {}, "Avatar"), @@ -80,7 +80,7 @@ comstat = (json, state) -> app(tbl, tb) stbl.inject(tbl) row.inject(stbl) - + if json.stats.code.timeseries and json.stats.code.timeseries.length > 0 widget = new Widget(4, {name: "New code contributors over time:", representation: 'bars'}) widget.parent = state.widget @@ -89,9 +89,9 @@ comstat = (json, state) -> widget.json = js widget.callback = linechart linechart(js, { widget: widget}) - + state.widget.inject(row.div) - + if json.stats.issues.seen > 0 row = new Row() js = { alphaSort: true, counts: { @@ -109,7 +109,7 @@ comstat = (json, state) -> if json.stats.issues.newcomers.length and json.stats.issues.newcomers.length >= 0 nl = json.stats.issues.newcomers.length stbl = new Widget(6, { name: "New issue contributors (#{nl})" }) - + tbl = mk('table', {class: "table table-striped"}) tr = mk('tr', {}, [ mk('th', {}, "Avatar"), @@ -127,7 +127,7 @@ comstat = (json, state) -> key = json.bios[person].issue[1].key || url wh = new Date(json.bios[person].issue[0] * 1000.0).toDateString() person = json.bios[person].bio - + if i == 6 m = json.stats.issues.newcomers.length - i tr = mk('tr', {scope: 'row', id: 'comstat_issue_more'}, [ @@ -146,7 +146,7 @@ comstat = (json, state) -> app(tbl, tb) stbl.inject(tbl) row.inject(stbl) - + if json.stats.issues.timeseries and json.stats.issues.timeseries.length > 0 widget = new Widget(6, {name: "New issue contributors over time:", representation: 'bars'}) widget.parent = state.widget @@ -155,15 +155,15 @@ comstat = (json, state) -> widget.json = js widget.callback = linechart linechart(js, { widget: widget}) - - + + state.widget.inject(row.div) if json.stats.converts if json.stats.converts.issue_to_code.length and json.stats.converts.issue_to_code.length > 0 row = new Row() - + stbl = new Widget(6, { name: "Previous issue contributors who are now contributing code:" }) - + tbl = mk('table', {class: "table table-striped"}) tr = mk('tr', {}, [ mk('th', {}, "Avatar"), @@ -186,14 +186,14 @@ comstat = (json, state) -> app(tbl, tb) stbl.inject(tbl) row.inject(stbl) - + state.widget.inject(row.div) - + if json.stats.converts.email_to_code.length and json.stats.converts.email_to_code.length > 0 row = new Row() - + stbl = new Widget(6, { name: "Previous email authors who are now contributing code:" }) - + tbl = mk('table', {class: "table table-striped"}) tr = mk('tr', {}, [ mk('th', {}, "Avatar"), @@ -216,10 +216,10 @@ comstat = (json, state) -> app(tbl, tb) stbl.inject(tbl) row.inject(stbl) - + state.widget.inject(row.div) else notice = new HTML('h2', {}, "Community growth stats only works with user-defined views!") p = new HTML('p', {}, "To see community growth stats, please create a view of the code, email, bugs you wish to view stats for, or select an existng view in the list above") state.widget.inject(notice, true) - state.widget.inject(p) \ No newline at end of file + state.widget.inject(p) diff --git a/ui/js/coffee/widget_donut.coffee b/ui/js/coffee/widget_donut.coffee index 542e8405..a5359ce5 100644 --- a/ui/js/coffee/widget_donut.coffee +++ b/ui/js/coffee/widget_donut.coffee @@ -1,6 +1,6 @@ # Donut widget donut = (json, state) -> - + dt = [] dtl = [] l = 0 @@ -21,7 +21,7 @@ donut = (json, state) -> for item in dt dtl.push(dt.name) theme.color = genColors(a+1, 0.55, 0.475, true) #quickColors(a) - + if (state.widget.args.representation == 'commentcount') code = 0 comment = 0 @@ -31,7 +31,7 @@ donut = (json, state) -> code += data.code comment += data.comment blank += data.blank||0 - + tot = code + comment dtl = ['Code', 'Comments'] dt = [ @@ -40,16 +40,16 @@ donut = (json, state) -> ] if blank > 0 dt.push({name: "Blanks", value: blank}) - + theme.color = genColors(3, 0.6, 0.5, true) - - + + if (state.widget.args.representation == 'sloccount' or (state.widget.args.representation != 'commentcount' and json.languages)) langs = json.languages for lang, data of langs tot += data.code top.push(lang) - + top.sort((a,b) => langs[b].code - langs[a].code) for lang in top l++ @@ -61,21 +61,19 @@ donut = (json, state) -> value: langs[lang].code }) dtl.push(lang) - - if (tot != ttot) + + if (tot != ttot) dtl.push('Other languages') dt.push( { name: 'Other languages', value: (tot-ttot) }) - + theme.color = genColors(17, 0.6, 0.5, true) - + data = {} for el in dt data[el.name] = el.value div = new HTML('div') state.widget.inject(div, true) chartBox = new Chart(div, 'donut', data, 25) - - \ No newline at end of file diff --git a/ui/js/coffee/widget_factors.coffee b/ui/js/coffee/widget_factors.coffee index ef642e66..789209ae 100644 --- a/ui/js/coffee/widget_factors.coffee +++ b/ui/js/coffee/widget_factors.coffee @@ -30,13 +30,13 @@ factors = (json, state) -> " #{pct}% change since last period" ]) h.inject(h2) - else + else h2 = new HTML('span', { style: { marginLeft: "8px", fontSize: "14px", color: 'green'}},[ new HTML('i', {class: "fa fa-chevron-circle-up"}), " +#{pct}% change since last period" ]) h.inject(h2) - + t = txt(factor.title) obj.inject(new HTML('div', {}, [h,t])) - state.widget.inject(obj, true) \ No newline at end of file + state.widget.inject(obj, true) diff --git a/ui/js/coffee/widget_jsondump.coffee b/ui/js/coffee/widget_jsondump.coffee index 8715b73e..677e9d48 100644 --- a/ui/js/coffee/widget_jsondump.coffee +++ b/ui/js/coffee/widget_jsondump.coffee @@ -2,4 +2,3 @@ jsondump = (json, state) -> pre = new HTML('pre', { style: { whiteSpace: 'pre-wrap'}}) pre.inject(JSON.stringify(json, null, 2)) state.widget.inject(pre, true) - diff --git a/ui/js/coffee/widget_map.coffee b/ui/js/coffee/widget_map.coffee index db11d680..82695c56 100644 --- a/ui/js/coffee/widget_map.coffee +++ b/ui/js/coffee/widget_map.coffee @@ -1,6 +1,6 @@ # Donut widget worldmap = (json, state) -> - + dt = [] dtl = [] l = 0 @@ -15,7 +15,7 @@ worldmap = (json, state) -> ctotal += details.count if details.count > cmax cmax = details.count - + lmain = document.createElement('div') radius = ['30%', '50%'] if not state.widget.div.style.height @@ -29,7 +29,7 @@ worldmap = (json, state) -> lmain.style.width = "100%" state.widget.inject(lmain, true) echartMap = echarts.init(lmain, theme); - + echartMap.setOption({ title: { text: "Worldwide distribution by country" @@ -62,9 +62,9 @@ worldmap = (json, state) -> trigger: 'item', formatter: (params) -> return params.seriesName + '
' + params.name + ' : ' + (params.value||0).pretty(); - + }, - + series: [{ name: state.widget.name, type: 'map', @@ -79,4 +79,3 @@ worldmap = (json, state) -> }] }); theme.textStyle.fontSize = 12 - \ No newline at end of file diff --git a/ui/js/coffee/widget_messages.coffee b/ui/js/coffee/widget_messages.coffee index ec97b883..fa3b1dab 100644 --- a/ui/js/coffee/widget_messages.coffee +++ b/ui/js/coffee/widget_messages.coffee @@ -1,5 +1,5 @@ messages = (json, state) -> - + if isArray json obj = document.createElement('form') @@ -15,10 +15,10 @@ messages = (json, state) -> app(tr, td) app(thead, tr) app(tbl, thead) - + tbody = mk('tbody') app(tbl, tbody) - + for message in json tr = mk('tr') if message.read == false @@ -30,14 +30,14 @@ messages = (json, state) -> app(a, txt(new Date(message.epoch*1000).toString())) app(td, a) app(tr, td) - + td = mk('td') a = mk('a') set(a, 'href', '?page=messages&message=' + message.id) app(a, txt(message.senderName)) app(td, a) app(tr, td) - + td = mk('td') a = mk('a') set(a, 'href', '?page=messages&message=' + message.id) @@ -45,18 +45,18 @@ messages = (json, state) -> app(td, a) app(tr, td) app(tbody, tr) - + app(obj, tbl) - + items = recipient: 'Recipient ID' subject: "Message subject" body: "Message" - + h2 = mk('h2') app(h2, txt("Send a message:")) app(obj, h2) - + for item in ['recipient', 'subject', 'body'] div = mk('div') app(div, txt(items[item] + ": ")) @@ -71,13 +71,13 @@ messages = (json, state) -> set(inp, 'name', item) app(div, inp) app(obj, div) - + btn = mk('input') set(btn, 'type', 'button') set(btn, 'onclick', 'sendEmail(this.form)') set(btn, 'value', "Send message") app(obj, btn) - + #obj.innerHTML += JSON.stringify(json) state.widget.inject(obj, true) else @@ -87,42 +87,42 @@ messages = (json, state) -> app(obj, b) app(obj, txt(json.senderName + ' (' + json.sender + ')')) app(obj, mk('br')) - + b = mk('b') app(b, txt("Date: ")) app(obj, b) app(obj, txt(new Date(json.epoch*1000).toString())) app(obj, mk('br')) - + b = mk('b') app(b, txt("Subject: ")) app(obj, b) app(obj, txt(json.subject)) app(obj, mk('br')) app(obj, mk('br')) - + pre = mk('pre') app(pre, txt(json.body)) app(obj, pre) - + app(obj, mk('hr')) - + form = mk('form') items = recipient: 'Recipient ID' subject: "Message subject" body: "Message" - + h2 = mk('h2') app(h2, txt("Send a reply:")) app(form, h2) - + reply = { recipient: json.sender subject: 'RE: ' + json.subject body: '' } - + for item in ['recipient', 'subject', 'body'] div = mk('div') app(div, txt(items[item] + ": ")) @@ -138,13 +138,13 @@ messages = (json, state) -> set(inp, 'name', item) app(div, inp) app(form, div) - + btn = mk('input') set(btn, 'type', 'button') set(btn, 'onclick', 'sendEmail(this.form)') set(btn, 'value', "Send message") app(form, btn) - + app(obj, form) state.widget.inject(obj, true) @@ -158,4 +158,3 @@ sendEmail = (form) -> if k in ['recipient', 'subject', 'body'] js[k] = v postJSON("messages", js, null, (a) -> alert("Mail sent!") ) - \ No newline at end of file diff --git a/ui/js/coffee/widget_mvp.coffee b/ui/js/coffee/widget_mvp.coffee index 3cfd5785..a89e4864 100644 --- a/ui/js/coffee/widget_mvp.coffee +++ b/ui/js/coffee/widget_mvp.coffee @@ -12,15 +12,15 @@ mvp = (json, state) -> n = null globArgs.size = n updateWidgets('mvp', null, { size: n }) - + , false) state.widget.inject( new HTML('b', {}, "List size: "), true ) state.widget.inject(nlist) - - + + nlist = new HTML('select', { name: 'sort', id: 'sort'}) for i in ['commits', 'issues', 'emails'] el = new HTML('option', { value: i, text: i}) @@ -34,13 +34,13 @@ mvp = (json, state) -> n = null globArgs.sort = n updateWidgets('mvp', null, { sort: n }) - + , false) state.widget.inject( new HTML('b', {}, " Sort by: "), ) state.widget.inject(nlist) - + tbl = mk('table', {class: "table table-striped"}) tr = mk('tr', {}, [ mk('th', {}, "Rank"), @@ -67,5 +67,3 @@ mvp = (json, state) -> app(tbl, tb) state.widget.inject(tbl) #updateWidgets('trends', null, { email: email }) - - \ No newline at end of file diff --git a/ui/js/coffee/widget_paragraph.coffee b/ui/js/coffee/widget_paragraph.coffee index e32970ed..211e4b7d 100644 --- a/ui/js/coffee/widget_paragraph.coffee +++ b/ui/js/coffee/widget_paragraph.coffee @@ -12,6 +12,3 @@ paragraph = (json, state) -> app(lmain, para) else app(lmain, mk('p', {style:"font-size: 1.2rem;"}, json.text)) - - - \ No newline at end of file diff --git a/ui/js/coffee/widget_preferences.coffee b/ui/js/coffee/widget_preferences.coffee index d1bd94f9..08aebefb 100644 --- a/ui/js/coffee/widget_preferences.coffee +++ b/ui/js/coffee/widget_preferences.coffee @@ -9,7 +9,7 @@ preferences = (json, state) -> token: "API token" desc = tag: "If set, only sources with this tag will be shown in your views." - + for item in ['screenname', 'fullname', 'email', 'tag', 'token'] div = mk('div') app(div, txt(items[item] + ": ")) @@ -43,16 +43,16 @@ preferences = (json, state) -> app(list, opt) app(div,list) app(obj, div) - + btn = mk('input') set(btn, 'type', 'button') set(btn, 'onclick', 'saveprefs(this.form)') set(btn, 'value', "Save preferences") app(obj, btn) - + #obj.innerHTML += JSON.stringify(json) state.widget.inject(obj, true) - + # Org admin? if json.admin aobj = mk('div') @@ -81,4 +81,3 @@ saveprefs = (form) -> if k in ['screenname', 'fullname', 'email', 'tag', 'organisation'] js[k] = v postJSON("preferences", js, null, (a) -> alert("Preferences saved!") ) - \ No newline at end of file diff --git a/ui/js/coffee/widget_publisher.coffee b/ui/js/coffee/widget_publisher.coffee index 100269d4..c0b3fc1e 100644 --- a/ui/js/coffee/widget_publisher.coffee +++ b/ui/js/coffee/widget_publisher.coffee @@ -3,7 +3,7 @@ publisherWidget = null publisherPublic = (json, state) -> publisher(json, state, true) - + publisher = (json, state, nolink) -> div = document.createElement('div') state.public = true @@ -33,12 +33,12 @@ publisher = (json, state, nolink) -> if not location.href.match(/snoot\.io/) link = mk('a', { href: "https://www.snoot.io/", style: "font-size: 10px; margin-left: 60px; font-family: sans-serif;"}, "Data courtesy of Snoot.io") state.widget.inject(link) - + publishWidget = () -> postJSON("publish", { publish: JSON.parse(viewJS) }, null, postPublishLink) - + postPublishLink = (json, state) -> if json.id pdiv = get('publishercode') @@ -52,4 +52,3 @@ postPublishLink = (json, state) -> app(pdiv, txt("Script code for publishing:\n\n
\n\n#{added}")) else alert("Something broke :(") - \ No newline at end of file diff --git a/ui/js/coffee/widget_punchcard.coffee b/ui/js/coffee/widget_punchcard.coffee index 9071db98..b92e77af 100644 --- a/ui/js/coffee/widget_punchcard.coffee +++ b/ui/js/coffee/widget_punchcard.coffee @@ -17,10 +17,6 @@ punchcard = (json, state) -> div = document.createElement('div') if json.text div.inject(new HTML('p', {}, json.text)) - + state.widget.inject(div, true) pc = new Chart(div, 'punchcard', json, {punchcard: true}) - - - - \ No newline at end of file diff --git a/ui/js/coffee/widget_radar.coffee b/ui/js/coffee/widget_radar.coffee index 63aac0b8..14410be6 100644 --- a/ui/js/coffee/widget_radar.coffee +++ b/ui/js/coffee/widget_radar.coffee @@ -3,13 +3,13 @@ radarIndicators = [] radar = (json, state) -> - + lmain = new HTML('div') state.widget.inject(lmain, true) - + radarChart = new Chart(lmain, 'radar', json.radar) - - + + # Harmonizer id = Math.floor(Math.random() * 987654321).toString(16) chk = document.createElement('input') @@ -23,7 +23,7 @@ radar = (json, state) -> if this.checked harmonize = 'true' globArgs['harmonize'] = 'true' - + updateWidgets('radar', null, { harmonize: harmonize }) ) state.widget.inject(mk('br')) @@ -35,7 +35,7 @@ radar = (json, state) -> label.style.paddingLeft = '5px' label.appendChild(document.createTextNode('Harmonize edges')) state.widget.inject(label) - + # Relativizer id = Math.floor(Math.random() * 987654321).toString(16) chk = document.createElement('input') @@ -49,7 +49,7 @@ radar = (json, state) -> if this.checked relativize = 'true' globArgs['relativize'] = 'true' - + updateWidgets('radar', null, { relativize: relativize }) ) state.widget.inject(mk('br')) @@ -60,4 +60,4 @@ radar = (json, state) -> chk.setAttribute("title", "Check this box to force all areas to be relative to their own projects (and not the compared projects). This may help to display foucs areas.") label.style.paddingLeft = '5px' label.appendChild(document.createTextNode('Make all projects relative to themselves')) - state.widget.inject(label) \ No newline at end of file + state.widget.inject(label) diff --git a/ui/js/coffee/widget_relation.coffee b/ui/js/coffee/widget_relation.coffee index 474c93d5..8bddba7d 100644 --- a/ui/js/coffee/widget_relation.coffee +++ b/ui/js/coffee/widget_relation.coffee @@ -2,23 +2,23 @@ relationship = (json, state) -> div = document.createElement('div') state.widget.inject(div, true) chart = new Chart(div, 'relationship', json, {}) - - + + id = Math.floor(Math.random() * 987654321).toString(16) invchk = new HTML('input', { class: "uniform", style: { marginRight: "10px"}, id: "author_#{id}", type: 'checkbox', checked: globArgs.author, name: 'author', value: 'true' }) - + invchk.addEventListener("change", () -> author = null if this.checked author = 'true' globArgs['author'] = 'true' - + updateWidgets('relationship', null, { author: author }) ) invlbl = new HTML('label', { for: "author_#{id}"}, "Inverse map (sender <-> recipient)") state.widget.inject(invchk) state.widget.inject(invlbl) - + state.widget.inject(new HTML('br')) state.widget.inject(new HTML('span', {}, "Minimum signal strength: ")) sigsel = new HTML('select', {id: "signal_#{id}"}) @@ -30,7 +30,7 @@ relationship = (json, state) -> if this.value links = this.value globArgs['links'] = links - + updateWidgets('relationship', null, { links: links }) ) - state.widget.inject(sigsel) \ No newline at end of file + state.widget.inject(sigsel) diff --git a/ui/js/coffee/widget_report.coffee b/ui/js/coffee/widget_report.coffee index d056b6eb..56a7f9c4 100644 --- a/ui/js/coffee/widget_report.coffee +++ b/ui/js/coffee/widget_report.coffee @@ -12,8 +12,8 @@ rcollate = (list) -> report = (json, state) -> div = document.createElement('div') state.widget.inject(div, true) - - + + # Get + write the age of the project, if possible if json.projectAge == 0 app(div, mk('h3', {}, "We were unable to determine the age of this project, sorry!")) @@ -22,18 +22,18 @@ report = (json, state) -> ageInYears = parseInt(json.projectAge / (86400*365.25)) age = mk('h3', {}, "Estimated age of project: #{ageInMonths} months (#{ageInYears} years)") app(div, age) - - + + # Commit rate trends if ageInYears >= 1 title = mk('h2', {}, "Long range trends:") - + app(div, title) - + # Commits stitle = mk('h3', {}, "Commits:") carr = [] - + # 5 year commit trend if ageInYears >= 5 pct = json.commits['5'].angle @@ -51,7 +51,7 @@ report = (json, state) -> if pct > 50 rtext = "a strong increase in commits in the long term (5+ years)" carr.push(rtext) - + # 2 year commit trend if ageInYears >= 2 pct = json.commits['2'].angle @@ -69,7 +69,7 @@ report = (json, state) -> if pct > 50 rtext = "a strong increase in commits in the medium term (2 years)" carr.push(rtext) - + # 1 year commit trend if ageInYears >= 1 pct = json.commits['1'].angle @@ -87,15 +87,15 @@ report = (json, state) -> if pct > 50 rtext = "a strong increase in commits in the short term (past year)" carr.push(rtext) - + p = mk('p', {}, "This project has experienced " + rcollate(carr) + ".") app(div, stitle) app(div, p) - + # Contributors stitle = mk('h3', {}, "Contributors:") carr = [] - + # 5 year commit trend if ageInYears >= 5 pct = json.authors['5'].authors.angle @@ -113,7 +113,7 @@ report = (json, state) -> if pct > 50 rtext = "a strong increase in contributors in the long term (5+ years)" carr.push(rtext) - + # 2 year commit trend if ageInYears >= 2 pct = json.authors['2'].authors.angle @@ -131,7 +131,7 @@ report = (json, state) -> if pct > 50 rtext = "a strong increase in contributors in the medium term (2 years)" carr.push(rtext) - + # 1 year commit trend if ageInYears >= 1 pct = json.authors['1'].authors.angle @@ -149,10 +149,9 @@ report = (json, state) -> if pct > 50 rtext = "a strong increase in contributors in the short term (past year)" carr.push(rtext) - + active = parseInt(json.authors['1'].authors.average) carr.push("currently has #{active} active contributors") p = mk('p', {}, "The project has had " + rcollate(carr) + ".") app(div, stitle) app(div, p) - \ No newline at end of file diff --git a/ui/js/coffee/widget_top5.coffee b/ui/js/coffee/widget_top5.coffee index 7cddea0a..e3bdb372 100644 --- a/ui/js/coffee/widget_top5.coffee +++ b/ui/js/coffee/widget_top5.coffee @@ -70,20 +70,20 @@ top5 = (json, state) -> pos = 5 while pos < json.topN.items.length nid = id + "_show_" + pos - + obj.inject(new HTML('a', { style: { cursor: 'pointer'}, onclick: "this.style.display = 'none'; get('#{nid}').style.display = 'block';"}, "Show more...")) obj = new HTML('div', { id: nid, style: { display: 'none'}}) make5(obj, json, pos) state.widget.inject(obj) pos += 5 - - + + showMore = (id) -> obj = document.getElementById(id) if obj obj.style.display = "block" - + filterPerson = (email) -> if email == "" @@ -96,4 +96,3 @@ filterPerson = (email) -> updateWidgets('relationship', null, { email: email }) updateWidgets('viewpicker', null, { email: email }) globArgs.email = email - \ No newline at end of file diff --git a/ui/js/coffee/widget_treemap.coffee b/ui/js/coffee/widget_treemap.coffee index 00a3074b..5b9cb96b 100644 --- a/ui/js/coffee/widget_treemap.coffee +++ b/ui/js/coffee/widget_treemap.coffee @@ -3,7 +3,7 @@ treemap = (json, state) -> cats = new Array() dates = new Array() catdata = {} - + filled = { areaStyle: {type: 'default' } } if json.widgetType if json.widgetType.chartType @@ -16,25 +16,25 @@ treemap = (json, state) -> #type = state.widget.args.representation if not json.widget.title or json.widget.title.length == 0 json.widget.title = 'Languages' - + if not state.widget.div.style.height div.style.minHeight = "900px" else div.style.minHeight = "100%" if state.widget.fullscreen div.style.minHeight = (window.innerHeight - 100) + "px" - + state.widget.inject(div, true) - - - + + + range = "" rect = div.getBoundingClientRect() theme.color = genColors(json.treemap.length+1, 0.6, 0.5, true) colors = genColors(json.treemap.length+1, 0.6, 0.5, true) theme.textStyle.fontSize = Math.max(12, window.innerHeight/100) echartLine = echarts.init(div, theme); - + ld = [] for lang, i in json.treemap ld.push(lang) @@ -45,7 +45,7 @@ treemap = (json, state) -> color: colors[i] } } - + option = { title: { @@ -58,7 +58,7 @@ treemap = (json, state) -> #selectedMode: 'single', data: ld }], - + tooltip: { show: true, feature: { @@ -71,18 +71,18 @@ treemap = (json, state) -> value = info.value; treePathInfo = info.treePathInfo; treePath = []; - + for i in [1...treePathInfo.length] treePath.push(treePathInfo[i].name) - - + + return [ '
' + treePath.join('/') + '
', 'Lines of Code: ' + value.pretty(), ].join(''); - + }, - + series: [ { name:json.widget.title, @@ -122,4 +122,3 @@ treemap = (json, state) -> ] } echartLine.setOption(option = option); - \ No newline at end of file diff --git a/ui/js/coffee/widget_trend.coffee b/ui/js/coffee/widget_trend.coffee index e8d661cb..090d7b3d 100644 --- a/ui/js/coffee/widget_trend.coffee +++ b/ui/js/coffee/widget_trend.coffee @@ -19,23 +19,23 @@ trendBox = (icon, count, title, desc) -> i.setAttribute("class", "fa " + (icons[icon] || 'fa-comments-o')) idiv.appendChild(i) cdiv.appendChild(idiv) - + # Count codiv = document.createElement('div') codiv.setAttribute("class", "count") codiv.appendChild(document.createTextNode(count)) cdiv.appendChild(codiv) - + # Title h3 = document.createElement('h4') h3.appendChild(document.createTextNode(title)) cdiv.appendChild(h3) - + # Description p = document.createElement('p') p.appendChild(document.createTextNode(desc)) cdiv.appendChild(p) - + div.appendChild(cdiv) return div @@ -57,6 +57,3 @@ trend = (json, state) -> tb = trendBox(icon, data.after.pretty(), data.title, linediff) state.widget.inject(tb, wipe) wipe = false - - - \ No newline at end of file diff --git a/ui/js/coffee/widget_views.coffee b/ui/js/coffee/widget_views.coffee index 8893a37d..28744955 100644 --- a/ui/js/coffee/widget_views.coffee +++ b/ui/js/coffee/widget_views.coffee @@ -65,9 +65,9 @@ filterView = (val) -> me.style.background = "#4B8" me.style.color = "#FFF" me.style.display = 'block' - + manageviews = (json, state) -> - + obj = mk('div') p = mk('p') app(p, txt("Views allow you to quickly set up a group of sources to view as a sub-organisation, much like tags, but faster.")) @@ -75,15 +75,15 @@ manageviews = (json, state) -> h3 = mk('h3') noviews = json.views.length || 0 app(h3, txt("You currently have " + noviews + " view" + (if noviews == 1 then '' else 's') + " in your database ")) - + btn = mk('input') set(btn, 'type', 'button') set(btn, 'class', 'btn btn-success') set(btn, 'value', 'Create a new view') set(btn, 'onclick', 'get("newdiv").style.display = "block"; this.style.display = "none";') - app(h3, btn) + app(h3, btn) app(obj, h3) - + newdiv = mk('div') set(newdiv, 'id', 'newdiv') newdiv.style.display = "none" @@ -94,7 +94,7 @@ manageviews = (json, state) -> app(newdiv, txt("Name your new view: ")) app(newdiv, inp) app(newdiv, mk('br')) - + if userAccount.userlevel == 'admin' or userAccount.defaultOrganisation in userAccount.ownerships inp = mk('input') set(inp, 'type', 'checkbox') @@ -102,7 +102,7 @@ manageviews = (json, state) -> app(newdiv, txt("Make view public (global): ")) app(newdiv, inp) app(newdiv, mk('br')) - + inp = mk('input') set(inp, 'type', 'text') set(inp, 'id', 'viewfilter') @@ -111,7 +111,7 @@ manageviews = (json, state) -> app(newdiv, inp) app(newdiv, mk('i', {}, "You can use the filter-select to quickly mark sources based on a regex. Type in 'foo' to select all sources matching 'foo' etc.")) app(newdiv, mk('br')) - + app(newdiv, txt("Select the sources you wish to add to this view below:")) app(newdiv, mk('br')) btn = mk('input') @@ -150,7 +150,7 @@ manageviews = (json, state) -> set(btn, 'value', 'Save view') set(btn, 'onclick', 'saveview();') app(newdiv, btn) - + app(obj, newdiv) for view in json.views popdiv = mk('div') @@ -163,8 +163,8 @@ manageviews = (json, state) -> popdiv.style.background = "#323234" h4.style.display = "inline-block" app(popdiv, h4) - - + + btn = mk('input') set(btn, 'type', 'button') set(btn, 'class', 'btn btn-warning') @@ -182,7 +182,7 @@ manageviews = (json, state) -> btn.style.marginLeft = "20px" btn.style.padding = "2px" app(popdiv, btn) - + btn = mk('input') set(btn, 'type', 'button') set(btn, 'class', 'btn btn-success') @@ -191,7 +191,7 @@ manageviews = (json, state) -> btn.style.marginLeft = "20px" btn.style.padding = "2px" app(popdiv, btn) - + h4.style.color = "#FFA" h4.style.cursor = 'pointer' set(h4, 'onclick', "get('" + view.id + "').style.display = (get('" + view.id + "').style.display == 'block') ? 'none' : 'block'") @@ -245,6 +245,4 @@ manageviews = (json, state) -> app(newdiv, btn) app(obj, popdiv) app(obj, newdiv) - state.widget.inject(obj, true) - - \ No newline at end of file + state.widget.inject(obj, true) diff --git a/ui/js/core.js b/ui/js/core.js index f0918731..ff814f00 100644 --- a/ui/js/core.js +++ b/ui/js/core.js @@ -202,4 +202,4 @@ Metis.panelBodyCollapse(); Metis.boxHiding(); }); -})(jQuery); \ No newline at end of file +})(jQuery); diff --git a/ui/js/d3.min.js b/ui/js/d3.min.js index 2135fcb4..57d22b5b 100644 --- a/ui/js/d3.min.js +++ b/ui/js/d3.min.js @@ -1,2 +1,2 @@ // https://d3js.org Version 4.10.2. Copyright 2017 Mike Bostock. -(function(t,n){"object"==typeof exports&&"undefined"!=typeof module?n(exports):"function"==typeof define&&define.amd?define(["exports"],n):n(t.d3=t.d3||{})})(this,function(t){"use strict";function n(t){return function(n,e){return ss(t(n),e)}}function e(t,n){return[t,n]}function r(t,n,e){var r=(n-t)/Math.max(0,e),i=Math.floor(Math.log(r)/Math.LN10),o=r/Math.pow(10,i);return i>=0?(o>=Ts?10:o>=ks?5:o>=Ns?2:1)*Math.pow(10,i):-Math.pow(10,-i)/(o>=Ts?10:o>=ks?5:o>=Ns?2:1)}function i(t,n,e){var r=Math.abs(n-t)/Math.max(0,e),i=Math.pow(10,Math.floor(Math.log(r)/Math.LN10)),o=r/i;return o>=Ts?i*=10:o>=ks?i*=5:o>=Ns&&(i*=2),n=0&&(e=t.slice(r+1),t=t.slice(0,r)),t&&!n.hasOwnProperty(t))throw new Error("unknown type: "+t);return{type:t,name:e}})}function v(t,n){for(var e,r=0,i=t.length;r=0&&(n=t.slice(e+1),t=t.slice(0,e)),{type:t,name:n}})}function T(t){return function(){var n=this.__on;if(n){for(var e,r=0,i=-1,o=n.length;rn?1:t>=n?0:NaN}function R(t){return function(){this.removeAttribute(t)}}function L(t){return function(){this.removeAttributeNS(t.space,t.local)}}function q(t,n){return function(){this.setAttribute(t,n)}}function U(t,n){return function(){this.setAttributeNS(t.space,t.local,n)}}function D(t,n){return function(){var e=n.apply(this,arguments);null==e?this.removeAttribute(t):this.setAttribute(t,e)}}function O(t,n){return function(){var e=n.apply(this,arguments);null==e?this.removeAttributeNS(t.space,t.local):this.setAttributeNS(t.space,t.local,e)}}function F(t){return function(){this.style.removeProperty(t)}}function I(t,n,e){return function(){this.style.setProperty(t,n,e)}}function Y(t,n,e){return function(){var r=n.apply(this,arguments);null==r?this.style.removeProperty(t):this.style.setProperty(t,r,e)}}function B(t,n){return t.style.getPropertyValue(n)||uf(t).getComputedStyle(t,null).getPropertyValue(n)}function j(t){return function(){delete this[t]}}function H(t,n){return function(){this[t]=n}}function X(t,n){return function(){var e=n.apply(this,arguments);null==e?delete this[t]:this[t]=e}}function $(t){return t.trim().split(/^|\s+/)}function V(t){return t.classList||new W(t)}function W(t){this._node=t,this._names=$(t.getAttribute("class")||"")}function Z(t,n){for(var e=V(t),r=-1,i=n.length;++r>8&15|n>>4&240,n>>4&15|240&n,(15&n)<<4|15&n,1)):(n=gf.exec(t))?kt(parseInt(n[1],16)):(n=mf.exec(t))?new At(n[1],n[2],n[3],1):(n=xf.exec(t))?new At(255*n[1]/100,255*n[2]/100,255*n[3]/100,1):(n=bf.exec(t))?Nt(n[1],n[2],n[3],n[4]):(n=wf.exec(t))?Nt(255*n[1]/100,255*n[2]/100,255*n[3]/100,n[4]):(n=Mf.exec(t))?Ct(n[1],n[2]/100,n[3]/100,1):(n=Tf.exec(t))?Ct(n[1],n[2]/100,n[3]/100,n[4]):kf.hasOwnProperty(t)?kt(kf[t]):"transparent"===t?new At(NaN,NaN,NaN,0):null}function kt(t){return new At(t>>16&255,t>>8&255,255&t,1)}function Nt(t,n,e,r){return r<=0&&(t=n=e=NaN),new At(t,n,e,r)}function St(t){return t instanceof Mt||(t=Tt(t)),t?(t=t.rgb(),new At(t.r,t.g,t.b,t.opacity)):new At}function Et(t,n,e,r){return 1===arguments.length?St(t):new At(t,n,e,null==r?1:r)}function At(t,n,e,r){this.r=+t,this.g=+n,this.b=+e,this.opacity=+r}function Ct(t,n,e,r){return r<=0?t=n=e=NaN:e<=0||e>=1?t=n=NaN:n<=0&&(t=NaN),new Rt(t,n,e,r)}function zt(t){if(t instanceof Rt)return new Rt(t.h,t.s,t.l,t.opacity);if(t instanceof Mt||(t=Tt(t)),!t)return new Rt;if(t instanceof Rt)return t;var n=(t=t.rgb()).r/255,e=t.g/255,r=t.b/255,i=Math.min(n,e,r),o=Math.max(n,e,r),u=NaN,a=o-i,c=(o+i)/2;return a?(u=n===o?(e-r)/a+6*(e0&&c<1?0:u,new Rt(u,a,c,t.opacity)}function Pt(t,n,e,r){return 1===arguments.length?zt(t):new Rt(t,n,e,null==r?1:r)}function Rt(t,n,e,r){this.h=+t,this.s=+n,this.l=+e,this.opacity=+r}function Lt(t,n,e){return 255*(t<60?n+(e-n)*t/60:t<180?e:t<240?n+(e-n)*(240-t)/60:n)}function qt(t){if(t instanceof Dt)return new Dt(t.l,t.a,t.b,t.opacity);if(t instanceof Ht){var n=t.h*Nf;return new Dt(t.l,Math.cos(n)*t.c,Math.sin(n)*t.c,t.opacity)}t instanceof At||(t=St(t));var e=Yt(t.r),r=Yt(t.g),i=Yt(t.b),o=Ot((.4124564*e+.3575761*r+.1804375*i)/Ef),u=Ot((.2126729*e+.7151522*r+.072175*i)/Af);return new Dt(116*u-16,500*(o-u),200*(u-Ot((.0193339*e+.119192*r+.9503041*i)/Cf)),t.opacity)}function Ut(t,n,e,r){return 1===arguments.length?qt(t):new Dt(t,n,e,null==r?1:r)}function Dt(t,n,e,r){this.l=+t,this.a=+n,this.b=+e,this.opacity=+r}function Ot(t){return t>Lf?Math.pow(t,1/3):t/Rf+zf}function Ft(t){return t>Pf?t*t*t:Rf*(t-zf)}function It(t){return 255*(t<=.0031308?12.92*t:1.055*Math.pow(t,1/2.4)-.055)}function Yt(t){return(t/=255)<=.04045?t/12.92:Math.pow((t+.055)/1.055,2.4)}function Bt(t){if(t instanceof Ht)return new Ht(t.h,t.c,t.l,t.opacity);t instanceof Dt||(t=qt(t));var n=Math.atan2(t.b,t.a)*Sf;return new Ht(n<0?n+360:n,Math.sqrt(t.a*t.a+t.b*t.b),t.l,t.opacity)}function jt(t,n,e,r){return 1===arguments.length?Bt(t):new Ht(t,n,e,null==r?1:r)}function Ht(t,n,e,r){this.h=+t,this.c=+n,this.l=+e,this.opacity=+r}function Xt(t){if(t instanceof Vt)return new Vt(t.h,t.s,t.l,t.opacity);t instanceof At||(t=St(t));var n=t.r/255,e=t.g/255,r=t.b/255,i=(Bf*r+If*n-Yf*e)/(Bf+If-Yf),o=r-i,u=(Ff*(e-i)-Df*o)/Of,a=Math.sqrt(u*u+o*o)/(Ff*i*(1-i)),c=a?Math.atan2(u,o)*Sf-120:NaN;return new Vt(c<0?c+360:c,a,i,t.opacity)}function $t(t,n,e,r){return 1===arguments.length?Xt(t):new Vt(t,n,e,null==r?1:r)}function Vt(t,n,e,r){this.h=+t,this.s=+n,this.l=+e,this.opacity=+r}function Wt(t,n,e,r,i){var o=t*t,u=o*t;return((1-3*t+3*o-u)*n+(4-6*o+3*u)*e+(1+3*t+3*o-3*u)*r+u*i)/6}function Zt(t,n){return function(e){return t+e*n}}function Gt(t,n,e){return t=Math.pow(t,e),n=Math.pow(n,e)-t,e=1/e,function(r){return Math.pow(t+r*n,e)}}function Jt(t,n){var e=n-t;return e?Zt(t,e>180||e<-180?e-360*Math.round(e/360):e):Jf(isNaN(t)?n:t)}function Qt(t){return 1==(t=+t)?Kt:function(n,e){return e-n?Gt(n,e,t):Jf(isNaN(n)?e:n)}}function Kt(t,n){var e=n-t;return e?Zt(t,e):Jf(isNaN(t)?n:t)}function tn(t){return function(n){var e,r,i=n.length,o=new Array(i),u=new Array(i),a=new Array(i);for(e=0;e180?n+=360:n-t>180&&(t+=360),o.push({i:e.push(i(e)+"rotate(",null,r)-2,x:rl(t,n)})):n&&e.push(i(e)+"rotate("+n+r)}function a(t,n,e,o){t!==n?o.push({i:e.push(i(e)+"skewX(",null,r)-2,x:rl(t,n)}):n&&e.push(i(e)+"skewX("+n+r)}function c(t,n,e,r,o,u){if(t!==e||n!==r){var a=o.push(i(o)+"scale(",null,",",null,")");u.push({i:a-4,x:rl(t,e)},{i:a-2,x:rl(n,r)})}else 1===e&&1===r||o.push(i(o)+"scale("+e+","+r+")")}return function(n,e){var r=[],i=[];return n=t(n),e=t(e),o(n.translateX,n.translateY,e.translateX,e.translateY,r,i),u(n.rotate,e.rotate,r,i),a(n.skewX,e.skewX,r,i),c(n.scaleX,n.scaleY,e.scaleX,e.scaleY,r,i),n=e=null,function(t){for(var n,e=-1,o=i.length;++e=0&&n._call.call(null,t),n=n._next;--Ml}function _n(){El=(Sl=Cl.now())+Al,Ml=Tl=0;try{vn()}finally{Ml=0,gn(),El=0}}function yn(){var t=Cl.now(),n=t-Sl;n>Nl&&(Al-=n,Sl=t)}function gn(){for(var t,n,e=Vf,r=1/0;e;)e._call?(r>e._time&&(r=e._time),t=e,e=e._next):(n=e._next,e._next=null,e=t?t._next=n:Vf=n);Wf=t,mn(r)}function mn(t){Ml||(Tl&&(Tl=clearTimeout(Tl)),t-El>24?(t<1/0&&(Tl=setTimeout(_n,t-Cl.now()-Al)),kl&&(kl=clearInterval(kl))):(kl||(Sl=Cl.now(),kl=setInterval(yn,Nl)),Ml=1,zl(_n)))}function xn(t,n){var e=t.__transition;if(!e||!(e=e[n])||e.state>ql)throw new Error("too late");return e}function bn(t,n){var e=t.__transition;if(!e||!(e=e[n])||e.state>Dl)throw new Error("too late");return e}function wn(t,n){var e=t.__transition;if(!e||!(e=e[n]))throw new Error("too late");return e}function Mn(t,n,e){function r(c){var s,f,l,h;if(e.state!==Ul)return o();for(s in a)if((h=a[s]).name===e.name){if(h.state===Ol)return Pl(r);h.state===Fl?(h.state=Yl,h.timer.stop(),h.on.call("interrupt",t,t.__data__,h.index,h.group),delete a[s]):+s=0&&(t=t.slice(0,n)),!t||"start"===t})}function Yn(t,n,e){var r,i,o=In(n)?xn:bn;return function(){var u=o(this,t),a=u.on;a!==r&&(i=(r=a).copy()).on(n,e),u.on=i}}function Bn(t){return function(){var n=this.parentNode;for(var e in this.__transition)if(+e!==t)return;n&&n.removeChild(this)}}function jn(t,n){var e,r,i;return function(){var o=B(this,t),u=(this.style.removeProperty(t),B(this,t));return o===u?null:o===e&&u===r?i:i=n(e=o,r=u)}}function Hn(t){return function(){this.style.removeProperty(t)}}function Xn(t,n,e){var r,i;return function(){var o=B(this,t);return o===e?null:o===r?i:i=n(r=o,e)}}function $n(t,n,e){var r,i,o;return function(){var u=B(this,t),a=e(this);return null==a&&(this.style.removeProperty(t),a=B(this,t)),u===a?null:u===r&&a===i?o:o=n(r=u,i=a)}}function Vn(t,n,e){function r(){var r=this,i=n.apply(r,arguments);return i&&function(n){r.style.setProperty(t,i(n),e)}}return r._value=n,r}function Wn(t){return function(){this.textContent=t}}function Zn(t){return function(){var n=t(this);this.textContent=null==n?"":n}}function Gn(t,n,e,r){this._groups=t,this._parents=n,this._name=e,this._id=r}function Jn(t){return dt().transition(t)}function Qn(){return++$l}function Kn(t){return((t*=2)<=1?t*t:--t*(2-t)+1)/2}function te(t){return((t*=2)<=1?t*t*t:(t-=2)*t*t+2)/2}function ne(t){return(1-Math.cos(Jl*t))/2}function ee(t){return((t*=2)<=1?Math.pow(2,10*t-10):2-Math.pow(2,10-10*t))/2}function re(t){return((t*=2)<=1?1-Math.sqrt(1-t*t):Math.sqrt(1-(t-=2)*t)+1)/2}function ie(t){return(t=+t)Math.abs(t[1]-U[1])?b=!0:x=!0),U=t,m=!0,xh(),o()}function o(){var t;switch(y=U[0]-q[0],g=U[1]-q[1],T){case wh:case bh:k&&(y=Math.max(C-a,Math.min(P-p,y)),s=a+y,d=p+y),N&&(g=Math.max(z-l,Math.min(R-v,g)),h=l+g,_=v+g);break;case Mh:k<0?(y=Math.max(C-a,Math.min(P-a,y)),s=a+y,d=p):k>0&&(y=Math.max(C-p,Math.min(P-p,y)),s=a,d=p+y),N<0?(g=Math.max(z-l,Math.min(R-l,g)),h=l+g,_=v):N>0&&(g=Math.max(z-v,Math.min(R-v,g)),h=l,_=v+g);break;case Th:k&&(s=Math.max(C,Math.min(P,a-y*k)),d=Math.max(C,Math.min(P,p+y*k))),N&&(h=Math.max(z,Math.min(R,l-g*N)),_=Math.max(z,Math.min(R,v+g*N)))}d0&&(a=s-y),N<0?v=_-g:N>0&&(l=h-g),T=wh,F.attr("cursor",Eh.selection),o());break;default:return}xh()},!0).on("keyup.brush",function(){switch(t.event.keyCode){case 16:L&&(x=b=L=!1,o());break;case 18:T===Th&&(k<0?p=d:k>0&&(a=s),N<0?v=_:N>0&&(l=h),T=Mh,o());break;case 32:T===wh&&(t.event.altKey?(k&&(p=d-y*k,a=s+y*k),N&&(v=_-g*N,l=h+g*N),T=Th):(k<0?p=d:k>0&&(a=s),N<0?v=_:N>0&&(l=h),T=Mh),F.attr("cursor",Eh[M]),o());break;default:return}xh()},!0).on("mousemove.brush",e,!0).on("mouseup.brush",u,!0);lf(t.event.view)}ue(),jl(w),r.call(w),D.start()}}function a(){var t=this.__brush||{selection:null};return t.extent=s.apply(this,arguments),t.dim=n,t}var c,s=se,f=ce,l=h(e,"start","brush","end"),p=6;return e.move=function(t,e){t.selection?t.on("start.brush",function(){i(this,arguments).beforestart().start()}).on("interrupt.brush end.brush",function(){i(this,arguments).end()}).tween("brush",function(){function t(t){u.selection=1===t&&le(s)?null:f(t),r.call(o),a.brush()}var o=this,u=o.__brush,a=i(o,arguments),c=u.selection,s=n.input("function"==typeof e?e.apply(this,arguments):e,u.extent),f=cl(c,s);return c&&s?t:t(1)}):t.each(function(){var t=this,o=arguments,u=t.__brush,a=n.input("function"==typeof e?e.apply(t,o):e,u.extent),c=i(t,o).beforestart();jl(t),u.selection=null==a||le(a)?null:a,r.call(t),c.start().brush().end()})},o.prototype={beforestart:function(){return 1==++this.active&&(this.state.emitter=this,this.starting=!0),this},start:function(){return this.starting&&(this.starting=!1,this.emit("start")),this},brush:function(){return this.emit("brush"),this},end:function(){return 0==--this.active&&(delete this.state.emitter,this.emit("end")),this},emit:function(t){N(new mh(e,t,n.output(this.state.selection)),l.apply,l,[t,this.that,this.args])}},e.extent=function(t){return arguments.length?(s="function"==typeof t?t:gh([[+t[0][0],+t[0][1]],[+t[1][0],+t[1][1]]]),e):s},e.filter=function(t){return arguments.length?(f="function"==typeof t?t:gh(!!t),e):f},e.handleSize=function(t){return arguments.length?(p=+t,e):p},e.on=function(){var t=l.on.apply(l,arguments);return t===l?e:t},e}function pe(t){return function(n,e){return t(n.source.value+n.target.value,e.source.value+e.target.value)}}function de(){this._x0=this._y0=this._x1=this._y1=null,this._=""}function ve(){return new de}function _e(t){return t.source}function ye(t){return t.target}function ge(t){return t.radius}function me(t){return t.startAngle}function xe(t){return t.endAngle}function be(){}function we(t,n){var e=new be;if(t instanceof be)t.each(function(t,n){e.set(n,t)});else if(Array.isArray(t)){var r,i=-1,o=t.length;if(null==n)for(;++i=(o=(v+y)/2))?v=o:y=o,(f=e>=(u=(_+g)/2))?_=u:g=u,i=p,!(p=p[l=f<<1|s]))return i[l]=d,t;if(a=+t._x.call(null,p.data),c=+t._y.call(null,p.data),n===a&&e===c)return d.next=p,i?i[l]=d:t._root=d,t;do{i=i?i[l]=new Array(4):t._root=new Array(4),(s=n>=(o=(v+y)/2))?v=o:y=o,(f=e>=(u=(_+g)/2))?_=u:g=u}while((l=f<<1|s)==(h=(c>=u)<<1|a>=o));return i[h]=p,i[l]=d,t}function Re(t){return t[0]}function Le(t){return t[1]}function qe(t,n,e){var r=new Ue(null==n?Re:n,null==e?Le:e,NaN,NaN,NaN,NaN);return null==t?r:r.addAll(t)}function Ue(t,n,e,r,i,o){this._x=t,this._y=n,this._x0=e,this._y0=r,this._x1=i,this._y1=o,this._root=void 0}function De(t){for(var n={data:t.data},e=n;t=t.next;)e=e.next={data:t.data};return n}function Oe(t){return t.x+t.vx}function Fe(t){return t.y+t.vy}function Ie(t){return t.index}function Ye(t,n){var e=t.get(n);if(!e)throw new Error("missing: "+n);return e}function Be(t){return t.x}function je(t){return t.y}function He(t){return new Xe(t)}function Xe(t){if(!(n=xp.exec(t)))throw new Error("invalid format: "+t);var n,e=n[1]||" ",r=n[2]||">",i=n[3]||"-",o=n[4]||"",u=!!n[5],a=n[6]&&+n[6],c=!!n[7],s=n[8]&&+n[8].slice(1),f=n[9]||"";"n"===f?(c=!0,f="g"):mp[f]||(f=""),(u||"0"===e&&"="===r)&&(u=!0,e="0",r="="),this.fill=e,this.align=r,this.sign=i,this.symbol=o,this.zero=u,this.width=a,this.comma=c,this.precision=s,this.type=f}function $e(n){return bp=Tp(n),t.format=bp.format,t.formatPrefix=bp.formatPrefix,bp}function Ve(){this.reset()}function We(t,n,e){var r=t.s=n+e,i=r-n,o=r-i;t.t=n-o+(e-i)}function Ze(t){return t>1?0:t<-1?cd:Math.acos(t)}function Ge(t){return t>1?sd:t<-1?-sd:Math.asin(t)}function Je(t){return(t=wd(t/2))*t}function Qe(){}function Ke(t,n){t&&Sd.hasOwnProperty(t.type)&&Sd[t.type](t,n)}function tr(t,n,e){var r,i=-1,o=t.length-e;for(n.lineStart();++i=0?1:-1,i=r*e,o=yd(n),u=wd(n),a=Rp*u,c=Pp*o+a*yd(i),s=a*r*wd(i);Ad.add(_d(s,c)),zp=t,Pp=o,Rp=u}function ur(t){return[_d(t[1],t[0]),Ge(t[2])]}function ar(t){var n=t[0],e=t[1],r=yd(e);return[r*yd(n),r*wd(n),wd(e)]}function cr(t,n){return t[0]*n[0]+t[1]*n[1]+t[2]*n[2]}function sr(t,n){return[t[1]*n[2]-t[2]*n[1],t[2]*n[0]-t[0]*n[2],t[0]*n[1]-t[1]*n[0]]}function fr(t,n){t[0]+=n[0],t[1]+=n[1],t[2]+=n[2]}function lr(t,n){return[t[0]*n,t[1]*n,t[2]*n]}function hr(t){var n=Td(t[0]*t[0]+t[1]*t[1]+t[2]*t[2]);t[0]/=n,t[1]/=n,t[2]/=n}function pr(t,n){Bp.push(jp=[Lp=t,Up=t]),nDp&&(Dp=n)}function dr(t,n){var e=ar([t*pd,n*pd]);if(Yp){var r=sr(Yp,e),i=sr([r[1],-r[0],0],r);hr(i),i=ur(i);var o,u=t-Op,a=u>0?1:-1,c=i[0]*hd*a,s=dd(u)>180;s^(a*OpDp&&(Dp=o):(c=(c+360)%360-180,s^(a*OpDp&&(Dp=n))),s?txr(Lp,Up)&&(Up=t):xr(t,Up)>xr(Lp,Up)&&(Lp=t):Up>=Lp?(tUp&&(Up=t)):t>Op?xr(Lp,t)>xr(Lp,Up)&&(Up=t):xr(t,Up)>xr(Lp,Up)&&(Lp=t)}else Bp.push(jp=[Lp=t,Up=t]);nDp&&(Dp=n),Yp=e,Op=t}function vr(){Rd.point=dr}function _r(){jp[0]=Lp,jp[1]=Up,Rd.point=pr,Yp=null}function yr(t,n){if(Yp){var e=t-Op;Pd.add(dd(e)>180?e+(e>0?360:-360):e)}else Fp=t,Ip=n;zd.point(t,n),dr(t,n)}function gr(){zd.lineStart()}function mr(){yr(Fp,Ip),zd.lineEnd(),dd(Pd)>ad&&(Lp=-(Up=180)),jp[0]=Lp,jp[1]=Up,Yp=null}function xr(t,n){return(n-=t)<0?n+360:n}function br(t,n){return t[0]-n[0]}function wr(t,n){return t[0]<=t[1]?t[0]<=n&&n<=t[1]:ncd?t-ld:t<-cd?t+ld:t,n]}function Lr(t,n,e){return(t%=ld)?n||e?Ud(Ur(t),Dr(n,e)):Ur(t):n||e?Dr(n,e):Rr}function qr(t){return function(n,e){return n+=t,[n>cd?n-ld:n<-cd?n+ld:n,e]}}function Ur(t){var n=qr(t);return n.invert=qr(-t),n}function Dr(t,n){function e(t,n){var e=yd(n),a=yd(t)*e,c=wd(t)*e,s=wd(n),f=s*r+a*i;return[_d(c*o-f*u,a*r-s*i),Ge(f*o+c*u)]}var r=yd(t),i=wd(t),o=yd(n),u=wd(n);return e.invert=function(t,n){var e=yd(n),a=yd(t)*e,c=wd(t)*e,s=wd(n),f=s*o-c*u;return[_d(c*o+s*u,a*r+f*i),Ge(f*r-a*i)]},e}function Or(t,n,e,r,i,o){if(e){var u=yd(n),a=wd(n),c=r*e;null==i?(i=n+r*ld,o=n-c/2):(i=Fr(u,i),o=Fr(u,o),(r>0?io)&&(i+=r*ld));for(var s,f=i;r>0?f>o:f0)do{s.point(0===f||3===f?t:e,f>1?r:n)}while((f=(f+a+4)%4)!==l);else s.point(o[0],o[1])}function u(r,i){return dd(r[0]-t)0?0:3:dd(r[0]-e)0?2:1:dd(r[1]-n)0?1:0:i>0?3:2}function a(t,n){return c(t.x,n.x)}function c(t,n){var e=u(t,1),r=u(n,1);return e!==r?e-r:0===e?n[1]-t[1]:1===e?t[0]-n[0]:2===e?t[1]-n[1]:n[0]-t[0]}return function(u){function c(t,n){i(t,n)&&w.point(t,n)}function s(){for(var n=0,e=0,i=h.length;er&&(l-o)*(r-u)>(p-u)*(t-o)&&++n:p<=r&&(l-o)*(r-u)<(p-u)*(t-o)&&--n;return n}function f(o,u){var a=i(o,u);if(h&&p.push([o,u]),x)d=o,v=u,_=a,x=!1,a&&(w.lineStart(),w.point(o,u));else if(a&&m)w.point(o,u);else{var c=[y=Math.max(tv,Math.min(Kd,y)),g=Math.max(tv,Math.min(Kd,g))],s=[o=Math.max(tv,Math.min(Kd,o)),u=Math.max(tv,Math.min(Kd,u))];Gd(c,s,t,n,e,r)?(m||(w.lineStart(),w.point(c[0],c[1])),w.point(s[0],s[1]),a||w.lineEnd(),b=!1):a&&(w.lineStart(),w.point(o,u),b=!1)}y=o,g=u,m=a}var l,h,p,d,v,_,y,g,m,x,b,w=u,M=Zd(),T={point:c,lineStart:function(){T.point=f,h&&h.push(p=[]),x=!0,m=!1,y=g=NaN},lineEnd:function(){l&&(f(d,v),_&&m&&M.rejoin(),l.push(M.result())),T.point=c,m&&w.lineEnd()},polygonStart:function(){w=M,l=[],h=[],b=!0},polygonEnd:function(){var t=s(),n=b&&t,e=(l=Cs(l)).length;(n||e)&&(u.polygonStart(),n&&(u.lineStart(),o(null,null,1,u),u.lineEnd()),e&&Qd(l,a,t,o,u),u.polygonEnd()),w=u,l=h=p=null}};return T}}function jr(){iv.point=iv.lineEnd=Qe}function Hr(t,n){Dd=t*=pd,Od=wd(n*=pd),Fd=yd(n),iv.point=Xr}function Xr(t,n){t*=pd;var e=wd(n*=pd),r=yd(n),i=dd(t-Dd),o=yd(i),u=r*wd(i),a=Fd*e-Od*r*o,c=Od*e+Fd*r*o;rv.add(_d(Td(u*u+a*a),c)),Dd=t,Od=e,Fd=r}function $r(t,n){return!(!t||!fv.hasOwnProperty(t.type))&&fv[t.type](t,n)}function Vr(t,n){return 0===cv(t,n)}function Wr(t,n){var e=cv(t[0],t[1]);return cv(t[0],n)+cv(n,t[1])<=e+ad}function Zr(t,n){return!!ev(t.map(Gr),Jr(n))}function Gr(t){return(t=t.map(Jr)).pop(),t}function Jr(t){return[t[0]*pd,t[1]*pd]}function Qr(t,n,e){var r=Ms(t,n-ad,e).concat(n);return function(t){return r.map(function(n){return[t,n]})}}function Kr(t,n,e){var r=Ms(t,n-ad,e).concat(n);return function(t){return r.map(function(n){return[n,t]})}}function ti(){function t(){return{type:"MultiLineString",coordinates:n()}}function n(){return Ms(gd(o/_)*_,i,_).map(h).concat(Ms(gd(s/y)*y,c,y).map(p)).concat(Ms(gd(r/d)*d,e,d).filter(function(t){return dd(t%_)>ad}).map(f)).concat(Ms(gd(a/v)*v,u,v).filter(function(t){return dd(t%y)>ad}).map(l))}var e,r,i,o,u,a,c,s,f,l,h,p,d=10,v=d,_=90,y=360,g=2.5;return t.lines=function(){return n().map(function(t){return{type:"LineString",coordinates:t}})},t.outline=function(){return{type:"Polygon",coordinates:[h(o).concat(p(c).slice(1),h(i).reverse().slice(1),p(s).reverse().slice(1))]}},t.extent=function(n){return arguments.length?t.extentMajor(n).extentMinor(n):t.extentMinor()},t.extentMajor=function(n){return arguments.length?(o=+n[0][0],i=+n[1][0],s=+n[0][1],c=+n[1][1],o>i&&(n=o,o=i,i=n),s>c&&(n=s,s=c,c=n),t.precision(g)):[[o,s],[i,c]]},t.extentMinor=function(n){return arguments.length?(r=+n[0][0],e=+n[1][0],a=+n[0][1],u=+n[1][1],r>e&&(n=r,r=e,e=n),a>u&&(n=a,a=u,u=n),t.precision(g)):[[r,a],[e,u]]},t.step=function(n){return arguments.length?t.stepMajor(n).stepMinor(n):t.stepMinor()},t.stepMajor=function(n){return arguments.length?(_=+n[0],y=+n[1],t):[_,y]},t.stepMinor=function(n){return arguments.length?(d=+n[0],v=+n[1],t):[d,v]},t.precision=function(n){return arguments.length?(g=+n,f=Qr(a,u,90),l=Kr(r,e,g),h=Qr(s,c,90),p=Kr(o,i,g),t):g},t.extentMajor([[-180,-90+ad],[180,90-ad]]).extentMinor([[-180,-80-ad],[180,80+ad]])}function ni(){dv.point=ei}function ei(t,n){dv.point=ri,Id=Bd=t,Yd=jd=n}function ri(t,n){pv.add(jd*t-Bd*n),Bd=t,jd=n}function ii(){ri(Id,Yd)}function oi(t,n){xv+=t,bv+=n,++wv}function ui(){Av.point=ai}function ai(t,n){Av.point=ci,oi($d=t,Vd=n)}function ci(t,n){var e=t-$d,r=n-Vd,i=Td(e*e+r*r);Mv+=i*($d+t)/2,Tv+=i*(Vd+n)/2,kv+=i,oi($d=t,Vd=n)}function si(){Av.point=oi}function fi(){Av.point=hi}function li(){pi(Hd,Xd)}function hi(t,n){Av.point=pi,oi(Hd=$d=t,Xd=Vd=n)}function pi(t,n){var e=t-$d,r=n-Vd,i=Td(e*e+r*r);Mv+=i*($d+t)/2,Tv+=i*(Vd+n)/2,kv+=i,Nv+=(i=Vd*t-$d*n)*($d+t),Sv+=i*(Vd+n),Ev+=3*i,oi($d=t,Vd=n)}function di(t){this._context=t}function vi(t,n){Uv.point=_i,zv=Rv=t,Pv=Lv=n}function _i(t,n){Rv-=t,Lv-=n,qv.add(Td(Rv*Rv+Lv*Lv)),Rv=t,Lv=n}function yi(){this._string=[]}function gi(t){return"m0,"+t+"a"+t+","+t+" 0 1,1 0,"+-2*t+"a"+t+","+t+" 0 1,1 0,"+2*t+"z"}function mi(t){return t.length>1}function xi(t,n){return((t=t.x)[0]<0?t[1]-sd-ad:sd-t[1])-((n=n.x)[0]<0?n[1]-sd-ad:sd-n[1])}function bi(t,n,e,r){var i,o,u=wd(t-e);return dd(u)>ad?vd((wd(n)*(o=yd(r))*wd(e)-wd(r)*(i=yd(n))*wd(t))/(i*o*u)):(n+r)/2}function wi(t){return function(n){var e=new Mi;for(var r in t)e[r]=t[r];return e.stream=n,e}}function Mi(){}function Ti(t,n,e){var r=n[1][0]-n[0][0],i=n[1][1]-n[0][1],o=t.clipExtent&&t.clipExtent();t.scale(150).translate([0,0]),null!=o&&t.clipExtent(null),Ed(e,t.stream(mv));var u=mv.result(),a=Math.min(r/(u[1][0]-u[0][0]),i/(u[1][1]-u[0][1])),c=+n[0][0]+(r-a*(u[1][0]+u[0][0]))/2,s=+n[0][1]+(i-a*(u[1][1]+u[0][1]))/2;return null!=o&&t.clipExtent(o),t.scale(150*a).translate([c,s])}function ki(t,n,e){return Ti(t,[[0,0],n],e)}function Ni(t){return wi({point:function(n,e){n=t(n,e),this.stream.point(n[0],n[1])}})}function Si(t,n){function e(r,i,o,u,a,c,s,f,l,h,p,d,v,_){var y=s-r,g=f-i,m=y*y+g*g;if(m>4*n&&v--){var x=u+h,b=a+p,w=c+d,M=Td(x*x+b*b+w*w),T=Ge(w/=M),k=dd(dd(w)-1)n||dd((y*A+g*C)/m-.5)>.3||u*h+a*p+c*d2?t[2]%360*pd:0,i()):[b*hd,w*hd,M*hd]},n.precision=function(t){return arguments.length?(A=Bv(r,E=t*t),o()):Td(E)},n.fitExtent=function(t,e){return Ti(n,t,e)},n.fitSize=function(t,e){return ki(n,t,e)},function(){return u=t.apply(this,arguments),n.invert=u.invert&&e,i()}}function Ci(t){var n=0,e=cd/3,r=Ai(t),i=r(n,e);return i.parallels=function(t){return arguments.length?r(n=t[0]*pd,e=t[1]*pd):[n*hd,e*hd]},i}function zi(t){function n(t,n){return[t*e,wd(n)/e]}var e=yd(t);return n.invert=function(t,n){return[t/e,Ge(n*e)]},n}function Pi(t,n){function e(t,n){var e=Td(o-2*i*wd(n))/i;return[e*wd(t*=i),u-e*yd(t)]}var r=wd(t),i=(r+wd(n))/2;if(dd(i)0?n<-sd+ad&&(n=-sd+ad):n>sd-ad&&(n=sd-ad);var e=o/bd(Oi(n),i);return[e*wd(i*t),o-e*yd(i*t)]}var r=yd(t),i=t===n?wd(t):xd(r/yd(n))/xd(Oi(n)/Oi(t)),o=r*bd(Oi(t),i)/i;return i?(e.invert=function(t,n){var e=o-n,r=Md(i)*Td(t*t+e*e);return[_d(t,dd(e))/i*Md(e),2*vd(bd(o/r,1/i))-sd]},e):Ui}function Ii(t,n){return[t,n]}function Yi(t,n){function e(t,n){var e=o-n,r=i*t;return[e*wd(r),o-e*yd(r)]}var r=yd(t),i=t===n?wd(t):(r-yd(n))/(n-t),o=r/i+t;return dd(i)=0;)n+=e[r].value;else n=1;t.value=n}function no(t,n){if(t===n)return t;var e=t.ancestors(),r=n.ancestors(),i=null;for(t=e.pop(),n=r.pop();t===n;)i=t,t=e.pop(),n=r.pop();return i}function eo(t,n){var e,r,i,o,u,a=new uo(t),c=+t.value&&(a.value=t.value),s=[a];for(null==n&&(n=ro);e=s.pop();)if(c&&(e.value=+e.data.value),(i=n(e.data))&&(u=i.length))for(e.children=new Array(u),o=u-1;o>=0;--o)s.push(r=e.children[o]=new uo(i[o])),r.parent=e,r.depth=e.depth+1;return a.eachBefore(oo)}function ro(t){return t.children}function io(t){t.data=t.data.data}function oo(t){var n=0;do{t.height=n}while((t=t.parent)&&t.height<++n)}function uo(t){this.data=t,this.depth=this.height=0,this.parent=null}function ao(t){for(var n,e,r=t.length;r;)e=Math.random()*r--|0,n=t[r],t[r]=t[e],t[e]=n;return t}function co(t,n){var e,r;if(lo(n,t))return[n];for(e=0;e0&&e*e>r*r+i*i}function lo(t,n){for(var e=0;ee*e+r*r}function mo(t){var n=t._,e=t.next._,r=n.r+e.r,i=(n.x*e.r+e.x*n.r)/r,o=(n.y*e.r+e.y*n.r)/r;return i*i+o*o}function xo(t){this._=t,this.next=null,this.previous=null}function bo(t){if(!(i=t.length))return 0;var n,e,r,i,o,u,a,c,s,f,l;if(n=t[0],n.x=0,n.y=0,!(i>1))return n.r;if(e=t[1],n.x=-e.r,e.x=n.r,e.y=0,!(i>2))return n.r+e.r;yo(e,n,r=t[2]),n=new xo(n),e=new xo(e),r=new xo(r),n.next=r.previous=e,e.next=n.previous=r,r.next=e.previous=n;t:for(a=3;a=0;)(n=i[o]).z+=e,n.m+=e,e+=n.s+(r+=n.c)}function Uo(t,n,e){return t.a.parent===n.parent?t.a:e}function Do(t,n){this._=t,this.parent=null,this.children=null,this.A=null,this.a=this,this.z=0,this.m=0,this.c=0,this.s=0,this.t=null,this.i=n}function Oo(t){for(var n,e,r,i,o,u=new Do(t,0),a=[u];n=a.pop();)if(r=n._.children)for(n.children=new Array(o=r.length),i=o-1;i>=0;--i)a.push(e=n.children[i]=new Do(r[i],i)),e.parent=n;return(u.parent=new Do(null,0)).children=[u],u}function Fo(t,n,e,r,i,o){for(var u,a,c,s,f,l,h,p,d,v,_,y=[],g=n.children,m=0,x=0,b=g.length,w=n.value;mh&&(h=a),_=f*f*v,(p=Math.max(h/_,_/l))>d){f-=a;break}d=p}y.push(u={value:f,dice:c1&&u_(t[e[r-2]],t[e[r-1]],t[i])<=0;)--r;e[r++]=i}return e.slice(0,r)}function Bo(t){this._size=t,this._call=this._error=null,this._tasks=[],this._data=[],this._waiting=this._active=this._ended=this._start=0}function jo(t){if(!t._start)try{Ho(t)}catch(n){if(t._tasks[t._ended+t._active-1])$o(t,n);else if(!t._data)throw n}}function Ho(t){for(;t._start=t._waiting&&t._active=0;)if((e=t._tasks[r])&&(t._tasks[r]=null,e.abort))try{e.abort()}catch(n){}t._active=NaN,Vo(t)}function Vo(t){if(!t._active&&t._call){var n=t._data;t._data=void 0,t._call(t._error,n)}}function Wo(t){if(null==t)t=1/0;else if(!((t=+t)>=1))throw new Error("invalid concurrency");return new Bo(t)}function Zo(t){return function(n,e){t(null==n?e:null)}}function Go(t){var n=t.responseType;return n&&"text"!==n?t.response:t.responseText}function Jo(t,n){return function(e){return t(e.responseText,n)}}function Qo(t){function n(n){var o=n+"",u=e.get(o);if(!u){if(i!==E_)return i;e.set(o,u=r.push(n))}return t[(u-1)%t.length]}var e=we(),r=[],i=E_;return t=null==t?[]:S_.call(t),n.domain=function(t){if(!arguments.length)return r.slice();r=[],e=we();for(var i,o,u=-1,a=t.length;++u=e?1:r(t)}}}function ru(t){return function(n,e){var r=t(n=+n,e=+e);return function(t){return t<=0?n:t>=1?e:r(t)}}}function iu(t,n,e,r){var i=t[0],o=t[1],u=n[0],a=n[1];return o2?ou:iu,o=u=null,r}function r(n){return(o||(o=i(a,c,f?eu(t):t,s)))(+n)}var i,o,u,a=z_,c=z_,s=cl,f=!1;return r.invert=function(t){return(u||(u=i(c,a,nu,f?ru(n):n)))(+t)},r.domain=function(t){return arguments.length?(a=N_.call(t,C_),e()):a.slice()},r.range=function(t){return arguments.length?(c=S_.call(t),e()):c.slice()},r.rangeRound=function(t){return c=S_.call(t),s=sl,e()},r.clamp=function(t){return arguments.length?(f=!!t,e()):f},r.interpolate=function(t){return arguments.length?(s=t,e()):s},e()}function cu(t){var n=t.domain;return t.ticks=function(t){var e=n();return Ss(e[0],e[e.length-1],null==t?10:t)},t.tickFormat=function(t,e){return P_(n(),t,e)},t.nice=function(e){null==e&&(e=10);var i,o=n(),u=0,a=o.length-1,c=o[u],s=o[a];return s0?i=r(c=Math.floor(c/i)*i,s=Math.ceil(s/i)*i,e):i<0&&(i=r(c=Math.ceil(c*i)/i,s=Math.floor(s*i)/i,e)),i>0?(o[u]=Math.floor(c/i)*i,o[a]=Math.ceil(s/i)*i,n(o)):i<0&&(o[u]=Math.ceil(c*i)/i,o[a]=Math.floor(s*i)/i,n(o)),t},t}function su(){var t=au(nu,rl);return t.copy=function(){return uu(t,su())},cu(t)}function fu(){function t(t){return+t}var n=[0,1];return t.invert=t,t.domain=t.range=function(e){return arguments.length?(n=N_.call(e,C_),t):n.slice()},t.copy=function(){return fu().domain(n)},cu(t)}function lu(t,n){return(n=Math.log(n/t))?function(e){return Math.log(e/t)/n}:A_(n)}function hu(t,n){return t<0?function(e){return-Math.pow(-n,e)*Math.pow(-t,1-e)}:function(e){return Math.pow(n,e)*Math.pow(t,1-e)}}function pu(t){return isFinite(t)?+("1e"+t):t<0?0:t}function du(t){return 10===t?pu:t===Math.E?Math.exp:function(n){return Math.pow(t,n)}}function vu(t){return t===Math.E?Math.log:10===t&&Math.log10||2===t&&Math.log2||(t=Math.log(t),function(n){return Math.log(n)/t})}function _u(t){return function(n){return-t(-n)}}function yu(){function n(){return o=vu(i),u=du(i),r()[0]<0&&(o=_u(o),u=_u(u)),e}var e=au(lu,hu).domain([1,10]),r=e.domain,i=10,o=vu(10),u=du(10);return e.base=function(t){return arguments.length?(i=+t,n()):i},e.domain=function(t){return arguments.length?(r(t),n()):r()},e.ticks=function(t){var n,e=r(),a=e[0],c=e[e.length-1];(n=c0){for(;hc)break;v.push(l)}}else for(;h=1;--f)if(!((l=s*f)c)break;v.push(l)}}else v=Ss(h,p,Math.min(p-h,d)).map(u);return n?v.reverse():v},e.tickFormat=function(n,r){if(null==r&&(r=10===i?".0e":","),"function"!=typeof r&&(r=t.format(r)),n===1/0)return r;null==n&&(n=10);var a=Math.max(1,i*n/e.ticks().length);return function(t){var n=t/u(Math.round(o(t)));return n*i0?i[n-1]:e[0],n=i?[o[i-1],r]:[o[n-1],o[n]]},t.copy=function(){return bu().domain([e,r]).range(u)},cu(t)}function wu(){function t(t){if(t<=t)return e[hs(n,t,0,r)]}var n=[.5],e=[0,1],r=1;return t.domain=function(i){return arguments.length?(n=S_.call(i),r=Math.min(n.length,e.length-1),t):n.slice()},t.range=function(i){return arguments.length?(e=S_.call(i),r=Math.min(n.length,e.length-1),t):e.slice()},t.invertExtent=function(t){var r=e.indexOf(t);return[n[r-1],n[r]]},t.copy=function(){return wu().domain(n).range(e)},t}function Mu(t,n,e,r){function i(n){return t(n=new Date(+n)),n}return i.floor=i,i.ceil=function(e){return t(e=new Date(e-1)),n(e,1),t(e),e},i.round=function(t){var n=i(t),e=i.ceil(t);return t-n0))return u;do{u.push(new Date(+e))}while(n(e,o),t(e),e=n)for(;t(n),!e(n);)n.setTime(n-1)},function(t,r){if(t>=t)if(r<0)for(;++r<=0;)for(;n(t,-1),!e(t););else for(;--r>=0;)for(;n(t,1),!e(t););})},e&&(i.count=function(n,r){return L_.setTime(+n),q_.setTime(+r),t(L_),t(q_),Math.floor(e(L_,q_))},i.every=function(t){return t=Math.floor(t),isFinite(t)&&t>0?t>1?i.filter(r?function(n){return r(n)%t==0}:function(n){return i.count(0,n)%t==0}):i:null}),i}function Tu(t){return Mu(function(n){n.setDate(n.getDate()-(n.getDay()+7-t)%7),n.setHours(0,0,0,0)},function(t,n){t.setDate(t.getDate()+7*n)},function(t,n){return(n-t-(n.getTimezoneOffset()-t.getTimezoneOffset())*O_)/F_})}function ku(t){return Mu(function(n){n.setUTCDate(n.getUTCDate()-(n.getUTCDay()+7-t)%7),n.setUTCHours(0,0,0,0)},function(t,n){t.setUTCDate(t.getUTCDate()+7*n)},function(t,n){return(n-t)/F_})}function Nu(t){if(0<=t.y&&t.y<100){var n=new Date(-1,t.m,t.d,t.H,t.M,t.S,t.L);return n.setFullYear(t.y),n}return new Date(t.y,t.m,t.d,t.H,t.M,t.S,t.L)}function Su(t){if(0<=t.y&&t.y<100){var n=new Date(Date.UTC(-1,t.m,t.d,t.H,t.M,t.S,t.L));return n.setUTCFullYear(t.y),n}return new Date(Date.UTC(t.y,t.m,t.d,t.H,t.M,t.S,t.L))}function Eu(t){return{y:t,m:0,d:1,H:0,M:0,S:0,L:0}}function Au(t){function n(t,n){return function(e){var r,i,o,u=[],a=-1,c=0,s=t.length;for(e instanceof Date||(e=new Date(+e));++a=c)return-1;if(37===(i=n.charCodeAt(u++))){if(i=n.charAt(u++),!(o=T[i in Dy?n.charAt(u++):i])||(r=o(t,e,r))<0)return-1}else if(i!=e.charCodeAt(r++))return-1}return r}var i=t.dateTime,o=t.date,u=t.time,a=t.periods,c=t.days,s=t.shortDays,f=t.months,l=t.shortMonths,h=Pu(a),p=Ru(a),d=Pu(c),v=Ru(c),_=Pu(s),y=Ru(s),g=Pu(f),m=Ru(f),x=Pu(l),b=Ru(l),w={a:function(t){return s[t.getDay()]},A:function(t){return c[t.getDay()]},b:function(t){return l[t.getMonth()]},B:function(t){return f[t.getMonth()]},c:null,d:Wu,e:Wu,H:Zu,I:Gu,j:Ju,L:Qu,m:Ku,M:ta,p:function(t){return a[+(t.getHours()>=12)]},S:na,U:ea,w:ra,W:ia,x:null,X:null,y:oa,Y:ua,Z:aa,"%":wa},M={a:function(t){return s[t.getUTCDay()]},A:function(t){return c[t.getUTCDay()]},b:function(t){return l[t.getUTCMonth()]},B:function(t){return f[t.getUTCMonth()]},c:null,d:ca,e:ca,H:sa,I:fa,j:la,L:ha,m:pa,M:da,p:function(t){return a[+(t.getUTCHours()>=12)]},S:va,U:_a,w:ya,W:ga,x:null,X:null,y:ma,Y:xa,Z:ba,"%":wa},T={a:function(t,n,e){var r=_.exec(n.slice(e));return r?(t.w=y[r[0].toLowerCase()],e+r[0].length):-1},A:function(t,n,e){var r=d.exec(n.slice(e));return r?(t.w=v[r[0].toLowerCase()],e+r[0].length):-1},b:function(t,n,e){var r=x.exec(n.slice(e));return r?(t.m=b[r[0].toLowerCase()],e+r[0].length):-1},B:function(t,n,e){var r=g.exec(n.slice(e));return r?(t.m=m[r[0].toLowerCase()],e+r[0].length):-1},c:function(t,n,e){return r(t,i,n,e)},d:Yu,e:Yu,H:ju,I:ju,j:Bu,L:$u,m:Iu,M:Hu,p:function(t,n,e){var r=h.exec(n.slice(e));return r?(t.p=p[r[0].toLowerCase()],e+r[0].length):-1},S:Xu,U:qu,w:Lu,W:Uu,x:function(t,n,e){return r(t,o,n,e)},X:function(t,n,e){return r(t,u,n,e)},y:Ou,Y:Du,Z:Fu,"%":Vu};return w.x=n(o,w),w.X=n(u,w),w.c=n(i,w),M.x=n(o,M),M.X=n(u,M),M.c=n(i,M),{format:function(t){var e=n(t+="",w);return e.toString=function(){return t},e},parse:function(t){var n=e(t+="",Nu);return n.toString=function(){return t},n},utcFormat:function(t){var e=n(t+="",M);return e.toString=function(){return t},e},utcParse:function(t){var n=e(t,Su);return n.toString=function(){return t},n}}}function Cu(t,n,e){var r=t<0?"-":"",i=(r?-t:t)+"",o=i.length;return r+(o68?1900:2e3),e+r[0].length):-1}function Fu(t,n,e){var r=/^(Z)|([+-]\d\d)(?:\:?(\d\d))?/.exec(n.slice(e,e+6));return r?(t.Z=r[1]?0:-(r[2]+(r[3]||"00")),e+r[0].length):-1}function Iu(t,n,e){var r=Oy.exec(n.slice(e,e+2));return r?(t.m=r[0]-1,e+r[0].length):-1}function Yu(t,n,e){var r=Oy.exec(n.slice(e,e+2));return r?(t.d=+r[0],e+r[0].length):-1}function Bu(t,n,e){var r=Oy.exec(n.slice(e,e+3));return r?(t.m=0,t.d=+r[0],e+r[0].length):-1}function ju(t,n,e){var r=Oy.exec(n.slice(e,e+2));return r?(t.H=+r[0],e+r[0].length):-1}function Hu(t,n,e){var r=Oy.exec(n.slice(e,e+2));return r?(t.M=+r[0],e+r[0].length):-1}function Xu(t,n,e){var r=Oy.exec(n.slice(e,e+2));return r?(t.S=+r[0],e+r[0].length):-1}function $u(t,n,e){var r=Oy.exec(n.slice(e,e+3));return r?(t.L=+r[0],e+r[0].length):-1}function Vu(t,n,e){var r=Fy.exec(n.slice(e,e+1));return r?e+r[0].length:-1}function Wu(t,n){return Cu(t.getDate(),n,2)}function Zu(t,n){return Cu(t.getHours(),n,2)}function Gu(t,n){return Cu(t.getHours()%12||12,n,2)}function Ju(t,n){return Cu(1+$_.count(fy(t),t),n,3)}function Qu(t,n){return Cu(t.getMilliseconds(),n,3)}function Ku(t,n){return Cu(t.getMonth()+1,n,2)}function ta(t,n){return Cu(t.getMinutes(),n,2)}function na(t,n){return Cu(t.getSeconds(),n,2)}function ea(t,n){return Cu(W_.count(fy(t),t),n,2)}function ra(t){return t.getDay()}function ia(t,n){return Cu(Z_.count(fy(t),t),n,2)}function oa(t,n){return Cu(t.getFullYear()%100,n,2)}function ua(t,n){return Cu(t.getFullYear()%1e4,n,4)}function aa(t){var n=t.getTimezoneOffset();return(n>0?"-":(n*=-1,"+"))+Cu(n/60|0,"0",2)+Cu(n%60,"0",2)}function ca(t,n){return Cu(t.getUTCDate(),n,2)}function sa(t,n){return Cu(t.getUTCHours(),n,2)}function fa(t,n){return Cu(t.getUTCHours()%12||12,n,2)}function la(t,n){return Cu(1+_y.count(Ly(t),t),n,3)}function ha(t,n){return Cu(t.getUTCMilliseconds(),n,3)}function pa(t,n){return Cu(t.getUTCMonth()+1,n,2)}function da(t,n){return Cu(t.getUTCMinutes(),n,2)}function va(t,n){return Cu(t.getUTCSeconds(),n,2)}function _a(t,n){return Cu(gy.count(Ly(t),t),n,2)}function ya(t){return t.getUTCDay()}function ga(t,n){return Cu(my.count(Ly(t),t),n,2)}function ma(t,n){return Cu(t.getUTCFullYear()%100,n,2)}function xa(t,n){return Cu(t.getUTCFullYear()%1e4,n,4)}function ba(){return"+0000"}function wa(){return"%"}function Ma(n){return qy=Au(n),t.timeFormat=qy.format,t.timeParse=qy.parse,t.utcFormat=qy.utcFormat,t.utcParse=qy.utcParse,qy}function Ta(t){return new Date(t)}function ka(t){return t instanceof Date?+t:+new Date(+t)}function Na(t,n,e,r,o,u,a,c,s){function f(i){return(a(i)1?0:t<-1?gg:Math.acos(t)}function Ca(t){return t>=1?mg:t<=-1?-mg:Math.asin(t)}function za(t){return t.innerRadius}function Pa(t){return t.outerRadius}function Ra(t){return t.startAngle}function La(t){return t.endAngle}function qa(t){return t&&t.padAngle}function Ua(t,n,e,r,i,o,u,a){var c=e-t,s=r-n,f=u-i,l=a-o,h=(f*(n-o)-l*(t-i))/(l*c-f*s);return[t+h*c,n+h*s]}function Da(t,n,e,r,i,o,u){var a=t-e,c=n-r,s=(u?o:-o)/_g(a*a+c*c),f=s*c,l=-s*a,h=t+f,p=n+l,d=e+f,v=r+l,_=(h+d)/2,y=(p+v)/2,g=d-h,m=v-p,x=g*g+m*m,b=i-o,w=h*v-d*p,M=(m<0?-1:1)*_g(pg(0,b*b*x-w*w)),T=(w*m-g*M)/x,k=(-w*g-m*M)/x,N=(w*m+g*M)/x,S=(-w*g+m*M)/x,E=T-_,A=k-y,C=N-_,z=S-y;return E*E+A*A>C*C+z*z&&(T=N,k=S),{cx:T,cy:k,x01:-f,y01:-l,x11:T*(i/b-1),y11:k*(i/b-1)}}function Oa(t){this._context=t}function Fa(t){return t[0]}function Ia(t){return t[1]}function Ya(t){this._curve=t}function Ba(t){function n(n){return new Ya(t(n))}return n._curve=t,n}function ja(t){var n=t.curve;return t.angle=t.x,delete t.x,t.radius=t.y,delete t.y,t.curve=function(t){return arguments.length?n(Ba(t)):n()._curve},t}function Ha(t){return t.source}function Xa(t){return t.target}function $a(t){function n(){var n,a=Cg.call(arguments),c=e.apply(this,a),s=r.apply(this,a);if(u||(u=n=ve()),t(u,+i.apply(this,(a[0]=c,a)),+o.apply(this,a),+i.apply(this,(a[0]=s,a)),+o.apply(this,a)),n)return u=null,n+""||null}var e=Ha,r=Xa,i=Fa,o=Ia,u=null;return n.source=function(t){return arguments.length?(e=t,n):e},n.target=function(t){return arguments.length?(r=t,n):r},n.x=function(t){return arguments.length?(i="function"==typeof t?t:sg(+t),n):i},n.y=function(t){return arguments.length?(o="function"==typeof t?t:sg(+t),n):o},n.context=function(t){return arguments.length?(u=null==t?null:t,n):u},n}function Va(t,n,e,r,i){t.moveTo(n,e),t.bezierCurveTo(n=(n+r)/2,e,n,i,r,i)}function Wa(t,n,e,r,i){t.moveTo(n,e),t.bezierCurveTo(n,e=(e+i)/2,r,e,r,i)}function Za(t,n,e,r,i){var o=Ag(n,e),u=Ag(n,e=(e+i)/2),a=Ag(r,e),c=Ag(r,i);t.moveTo(o[0],o[1]),t.bezierCurveTo(u[0],u[1],a[0],a[1],c[0],c[1])}function Ga(t,n,e){t._context.bezierCurveTo((2*t._x0+t._x1)/3,(2*t._y0+t._y1)/3,(t._x0+2*t._x1)/3,(t._y0+2*t._y1)/3,(t._x0+4*t._x1+n)/6,(t._y0+4*t._y1+e)/6)}function Ja(t){this._context=t}function Qa(t){this._context=t}function Ka(t){this._context=t}function tc(t,n){this._basis=new Ja(t),this._beta=n}function nc(t,n,e){t._context.bezierCurveTo(t._x1+t._k*(t._x2-t._x0),t._y1+t._k*(t._y2-t._y0),t._x2+t._k*(t._x1-n),t._y2+t._k*(t._y1-e),t._x2,t._y2)}function ec(t,n){this._context=t,this._k=(1-n)/6}function rc(t,n){this._context=t,this._k=(1-n)/6}function ic(t,n){this._context=t,this._k=(1-n)/6}function oc(t,n,e){var r=t._x1,i=t._y1,o=t._x2,u=t._y2;if(t._l01_a>yg){var a=2*t._l01_2a+3*t._l01_a*t._l12_a+t._l12_2a,c=3*t._l01_a*(t._l01_a+t._l12_a);r=(r*a-t._x0*t._l12_2a+t._x2*t._l01_2a)/c,i=(i*a-t._y0*t._l12_2a+t._y2*t._l01_2a)/c}if(t._l23_a>yg){var s=2*t._l23_2a+3*t._l23_a*t._l12_a+t._l12_2a,f=3*t._l23_a*(t._l23_a+t._l12_a);o=(o*s+t._x1*t._l23_2a-n*t._l12_2a)/f,u=(u*s+t._y1*t._l23_2a-e*t._l12_2a)/f}t._context.bezierCurveTo(r,i,o,u,t._x2,t._y2)}function uc(t,n){this._context=t,this._alpha=n}function ac(t,n){this._context=t,this._alpha=n}function cc(t,n){this._context=t,this._alpha=n}function sc(t){this._context=t}function fc(t){return t<0?-1:1}function lc(t,n,e){var r=t._x1-t._x0,i=n-t._x1,o=(t._y1-t._y0)/(r||i<0&&-0),u=(e-t._y1)/(i||r<0&&-0),a=(o*i+u*r)/(r+i);return(fc(o)+fc(u))*Math.min(Math.abs(o),Math.abs(u),.5*Math.abs(a))||0}function hc(t,n){var e=t._x1-t._x0;return e?(3*(t._y1-t._y0)/e-n)/2:n}function pc(t,n,e){var r=t._x0,i=t._y0,o=t._x1,u=t._y1,a=(o-r)/3;t._context.bezierCurveTo(r+a,i+a*n,o-a,u-a*e,o,u)}function dc(t){this._context=t}function vc(t){this._context=new _c(t)}function _c(t){this._context=t}function yc(t){this._context=t}function gc(t){var n,e,r=t.length-1,i=new Array(r),o=new Array(r),u=new Array(r);for(i[0]=0,o[0]=2,u[0]=t[0]+2*t[1],n=1;n=0;--n)i[n]=(u[n]-i[n+1])/o[n];for(o[r-1]=(t[r]+i[r-1])/2,n=0;n0)){if(o/=h,h<0){if(o0){if(o>l)return;o>f&&(f=o)}if(o=r-c,h||!(o<0)){if(o/=h,h<0){if(o>l)return;o>f&&(f=o)}else if(h>0){if(o0)){if(o/=p,p<0){if(o0){if(o>l)return;o>f&&(f=o)}if(o=i-s,p||!(o<0)){if(o/=p,p<0){if(o>l)return;o>f&&(f=o)}else if(p>0){if(o0||l<1)||(f>0&&(t[0]=[c+f*h,s+f*p]),l<1&&(t[1]=[c+l*h,s+l*p]),!0)}}}}}function Rc(t,n,e,r,i){var o=t[1];if(o)return!0;var u,a,c=t[0],s=t.left,f=t.right,l=s[0],h=s[1],p=f[0],d=f[1],v=(l+p)/2,_=(h+d)/2;if(d===h){if(v=r)return;if(l>p){if(c){if(c[1]>=i)return}else c=[v,e];o=[v,i]}else{if(c){if(c[1]1)if(l>p){if(c){if(c[1]>=i)return}else c=[(e-a)/u,e];o=[(i-a)/u,i]}else{if(c){if(c[1]=r)return}else c=[n,u*n+a];o=[r,u*r+a]}else{if(c){if(c[0]dm||Math.abs(i[0][1]-i[1][1])>dm)||delete lm[o]}function qc(t){return sm[t.index]={site:t,halfedges:[]}}function Uc(t,n){var e=t.site,r=n.left,i=n.right;return e===i&&(i=r,r=e),i?Math.atan2(i[1]-r[1],i[0]-r[0]):(e===r?(r=n[1],i=n[0]):(r=n[0],i=n[1]),Math.atan2(r[0]-i[0],i[1]-r[1]))}function Dc(t,n){return n[+(n.left!==t.site)]}function Oc(t,n){return n[+(n.left===t.site)]}function Fc(){for(var t,n,e,r,i=0,o=sm.length;idm||Math.abs(v-h)>dm)&&(c.splice(a,0,lm.push(Cc(u,p,Math.abs(d-t)dm?[t,Math.abs(l-t)dm?[Math.abs(h-r)dm?[e,Math.abs(l-e)dm?[Math.abs(h-n)=-vm)){var p=c*c+s*s,d=f*f+l*l,v=(l*p-s*d)/h,_=(c*d-f*p)/h,y=hm.pop()||new Yc;y.arc=t,y.site=i,y.x=v+u,y.y=(y.cy=_+a)+Math.sqrt(v*v+_*_),t.circle=y;for(var g=null,m=fm._;m;)if(y.ydm)a=a.L;else{if(!((i=o-Gc(a,u))>dm)){r>-dm?(n=a.P,e=a):i>-dm?(n=a,e=a.N):n=e=a;break}if(!a.R){n=a;break}a=a.R}qc(t);var c=Xc(t);if(cm.insert(n,c),n||e){if(n===e)return jc(n),e=Xc(n.site),cm.insert(c,e),c.edge=e.edge=Ac(n.site,c.site),Bc(n),void Bc(e);if(e){jc(n),jc(e);var s=n.site,f=s[0],l=s[1],h=t[0]-f,p=t[1]-l,d=e.site,v=d[0]-f,_=d[1]-l,y=2*(h*_-p*v),g=h*h+p*p,m=v*v+_*_,x=[(_*g-p*m)/y+f,(h*m-v*g)/y+l];zc(e.edge,s,d,x),c.edge=Ac(s,t,null,x),e.edge=Ac(t,d,null,x),Bc(n),Bc(e)}else c.edge=Ac(n.site,c.site)}}function Zc(t,n){var e=t.site,r=e[0],i=e[1],o=i-n;if(!o)return r;var u=t.P;if(!u)return-1/0;var a=(e=u.site)[0],c=e[1],s=c-n;if(!s)return a;var f=a-r,l=1/o-1/s,h=f/s;return l?(-h+Math.sqrt(h*h-2*l*(f*f/(-2*s)-c+s/2+i-o/2)))/l+r:(r+a)/2}function Gc(t,n){var e=t.N;if(e)return Zc(e,n);var r=t.site;return r[1]===n?r[0]:1/0}function Jc(t,n,e){return(t[0]-e[0])*(n[1]-t[1])-(t[0]-n[0])*(e[1]-t[1])}function Qc(t,n){return n[1]-t[1]||n[0]-t[0]}function Kc(t,n){var e,r,i,o=t.sort(Qc).pop();for(lm=[],sm=new Array(t.length),cm=new Tc,fm=new Tc;;)if(i=am,o&&(!i||o[1]n?1:t>=n?0:NaN},fs=function(t){return 1===t.length&&(t=n(t)),{left:function(n,e,r,i){for(null==r&&(r=0),null==i&&(i=n.length);r>>1;t(n[o],e)<0?r=o+1:i=o}return r},right:function(n,e,r,i){for(null==r&&(r=0),null==i&&(i=n.length);r>>1;t(n[o],e)>0?i=o:r=o+1}return r}}},ls=fs(ss),hs=ls.right,ps=ls.left,ds=function(t){return null===t?NaN:+t},vs=function(t,n){var e,r,i=t.length,o=0,u=-1,a=0,c=0;if(null==n)for(;++u1)return c/(o-1)},_s=function(t,n){var e=vs(t,n);return e?Math.sqrt(e):e},ys=function(t,n){var e,r,i,o=t.length,u=-1;if(null==n){for(;++u=e)for(r=i=e;++ue&&(r=e),i=e)for(r=i=e;++ue&&(r=e),i0)for(t=Math.ceil(t/u),n=Math.floor(n/u),o=new Array(i=Math.ceil(n-t+1));++c=1)return+e(t[r-1],r-1,t);var r,i=(r-1)*n,o=Math.floor(i),u=+e(t[o],o,t);return u+(+e(t[o+1],o+1,t)-u)*(i-o)}},Cs=function(t){for(var n,e,r,i=t.length,o=-1,u=0;++o=0;)for(n=(r=t[i]).length;--n>=0;)e[--u]=r[n];return e},zs=function(t,n){var e,r,i=t.length,o=-1;if(null==n){for(;++o=e)for(r=e;++oe&&(r=e)}else for(;++o=e)for(r=e;++oe&&(r=e);return r},Ps=function(t){if(!(i=t.length))return[];for(var n=-1,e=zs(t,o),r=new Array(e);++n0)for(var e,r,i=new Array(e),o=0;o=0&&"xmlns"!==(n=t.slice(0,e))&&(t=t.slice(e+1)),Bs.hasOwnProperty(n)?{space:Bs[n],local:t}:t},Hs=function(t){var n=js(t);return(n.local?g:y)(n)},Xs=0;x.prototype=m.prototype={constructor:x,get:function(t){for(var n=this._;!(n in t);)if(!(t=t.parentNode))return;return t[n]},set:function(t,n){return t[this._]=n},remove:function(t){return this._ in t&&delete t[this._]},toString:function(){return this._}};var $s=function(t){return function(){return this.matches(t)}};if("undefined"!=typeof document){var Vs=document.documentElement;if(!Vs.matches){var Ws=Vs.webkitMatchesSelector||Vs.msMatchesSelector||Vs.mozMatchesSelector||Vs.oMatchesSelector;$s=function(t){return function(){return Ws.call(this,t)}}}}var Zs=$s,Gs={};t.event=null,"undefined"!=typeof document&&("onmouseenter"in document.documentElement||(Gs={mouseenter:"mouseover",mouseleave:"mouseout"}));var Js=function(){for(var n,e=t.event;n=e.sourceEvent;)e=n;return e},Qs=function(t,n){var e=t.ownerSVGElement||t;if(e.createSVGPoint){var r=e.createSVGPoint();return r.x=n.clientX,r.y=n.clientY,r=r.matrixTransform(t.getScreenCTM().inverse()),[r.x,r.y]}var i=t.getBoundingClientRect();return[n.clientX-i.left-t.clientLeft,n.clientY-i.top-t.clientTop]},Ks=function(t){var n=Js();return n.changedTouches&&(n=n.changedTouches[0]),Qs(t,n)},tf=function(t){return null==t?S:function(){return this.querySelector(t)}},nf=function(t){return null==t?E:function(){return this.querySelectorAll(t)}},ef=function(t){return new Array(t.length)};A.prototype={constructor:A,appendChild:function(t){return this._parent.insertBefore(t,this._next)},insertBefore:function(t,n){return this._parent.insertBefore(t,n)},querySelector:function(t){return this._parent.querySelector(t)},querySelectorAll:function(t){return this._parent.querySelectorAll(t)}};var rf=function(t){return function(){return t}},of="$",uf=function(t){return t.ownerDocument&&t.ownerDocument.defaultView||t.document&&t||t.defaultView};W.prototype={add:function(t){this._names.indexOf(t)<0&&(this._names.push(t),this._node.setAttribute("class",this._names.join(" ")))},remove:function(t){var n=this._names.indexOf(t);n>=0&&(this._names.splice(n,1),this._node.setAttribute("class",this._names.join(" ")))},contains:function(t){return this._names.indexOf(t)>=0}};var af=[null];pt.prototype=dt.prototype={constructor:pt,select:function(t){"function"!=typeof t&&(t=tf(t));for(var n=this._groups,e=n.length,r=new Array(e),i=0;i=x&&(x=m+1);!(g=_[x])&&++x=0;)(r=i[o])&&(u&&u!==r.nextSibling&&u.parentNode.insertBefore(r,u),u=r);return this},sort:function(t){t||(t=P);for(var n=this._groups,e=n.length,r=new Array(e),i=0;i1?this.each((null==n?F:"function"==typeof n?Y:I)(t,n,null==e?"":e)):B(this.node(),t)},property:function(t,n){return arguments.length>1?this.each((null==n?j:"function"==typeof n?X:H)(t,n)):this.node()[t]},classed:function(t,n){var e=$(t+"");if(arguments.length<2){for(var r=V(this.node()),i=-1,o=e.length;++i=240?t-240:t+120,i,r),Lt(t,i,r),Lt(t<120?t+240:t-120,i,r),this.opacity)},displayable:function(){return(0<=this.s&&this.s<=1||isNaN(this.s))&&0<=this.l&&this.l<=1&&0<=this.opacity&&this.opacity<=1}}));var Nf=Math.PI/180,Sf=180/Math.PI,Ef=.95047,Af=1,Cf=1.08883,zf=4/29,Pf=6/29,Rf=3*Pf*Pf,Lf=Pf*Pf*Pf;pf(Dt,Ut,wt(Mt,{brighter:function(t){return new Dt(this.l+18*(null==t?1:t),this.a,this.b,this.opacity)},darker:function(t){return new Dt(this.l-18*(null==t?1:t),this.a,this.b,this.opacity)},rgb:function(){var t=(this.l+16)/116,n=isNaN(this.a)?t:t+this.a/500,e=isNaN(this.b)?t:t-this.b/200;return t=Af*Ft(t),n=Ef*Ft(n),e=Cf*Ft(e),new At(It(3.2404542*n-1.5371385*t-.4985314*e),It(-.969266*n+1.8760108*t+.041556*e),It(.0556434*n-.2040259*t+1.0572252*e),this.opacity)}})),pf(Ht,jt,wt(Mt,{brighter:function(t){return new Ht(this.h,this.c,this.l+18*(null==t?1:t),this.opacity)},darker:function(t){return new Ht(this.h,this.c,this.l-18*(null==t?1:t),this.opacity)},rgb:function(){return qt(this).rgb()}}));var qf=-.14861,Uf=1.78277,Df=-.29227,Of=-.90649,Ff=1.97294,If=Ff*Of,Yf=Ff*Uf,Bf=Uf*Df-Of*qf;pf(Vt,$t,wt(Mt,{brighter:function(t){return t=null==t?1/.7:Math.pow(1/.7,t),new Vt(this.h,this.s,this.l*t,this.opacity)},darker:function(t){return t=null==t?.7:Math.pow(.7,t),new Vt(this.h,this.s,this.l*t,this.opacity)},rgb:function(){var t=isNaN(this.h)?0:(this.h+120)*Nf,n=+this.l,e=isNaN(this.s)?0:this.s*n*(1-n),r=Math.cos(t),i=Math.sin(t);return new At(255*(n+e*(qf*r+Uf*i)),255*(n+e*(Df*r+Of*i)),255*(n+e*(Ff*r)),this.opacity)}}));var jf,Hf,Xf,$f,Vf,Wf,Zf=function(t){var n=t.length-1;return function(e){var r=e<=0?e=0:e>=1?(e=1,n-1):Math.floor(e*n),i=t[r],o=t[r+1],u=r>0?t[r-1]:2*i-o,a=ro&&(i=n.slice(o,i),a[u]?a[u]+=i:a[++u]=i),(e=e[0])===(r=r[0])?a[u]?a[u]+=r:a[++u]=r:(a[++u]=null,c.push({i:u,x:rl(e,r)})),o=ul.lastIndex;return oDl&&e.state1e-6)if(Math.abs(f*a-c*s)>1e-6&&i){var h=e-o,p=r-u,d=a*a+c*c,v=h*h+p*p,_=Math.sqrt(d),y=Math.sqrt(l),g=i*Math.tan((Yh-Math.acos((d+l-v)/(2*_*y)))/2),m=g/y,x=g/_;Math.abs(m-1)>1e-6&&(this._+="L"+(t+m*s)+","+(n+m*f)),this._+="A"+i+","+i+",0,0,"+ +(f*h>s*p)+","+(this._x1=t+x*a)+","+(this._y1=n+x*c)}else this._+="L"+(this._x1=t)+","+(this._y1=n);else;},arc:function(t,n,e,r,i,o){t=+t,n=+n;var u=(e=+e)*Math.cos(r),a=e*Math.sin(r),c=t+u,s=n+a,f=1^o,l=o?r-i:i-r;if(e<0)throw new Error("negative radius: "+e);null===this._x1?this._+="M"+c+","+s:(Math.abs(this._x1-c)>1e-6||Math.abs(this._y1-s)>1e-6)&&(this._+="L"+c+","+s),e&&(l<0&&(l=l%Bh+Bh),l>jh?this._+="A"+e+","+e+",0,1,"+f+","+(t-u)+","+(n-a)+"A"+e+","+e+",0,1,"+f+","+(this._x1=c)+","+(this._y1=s):l>1e-6&&(this._+="A"+e+","+e+",0,"+ +(l>=Yh)+","+f+","+(this._x1=t+e*Math.cos(i))+","+(this._y1=n+e*Math.sin(i))))},rect:function(t,n,e,r){this._+="M"+(this._x0=this._x1=+t)+","+(this._y0=this._y1=+n)+"h"+ +e+"v"+ +r+"h"+-e+"Z"},toString:function(){return this._}};be.prototype=we.prototype={constructor:be,has:function(t){return"$"+t in this},get:function(t){return this["$"+t]},set:function(t,n){return this["$"+t]=n,this},remove:function(t){var n="$"+t;return n in this&&delete this[n]},clear:function(){for(var t in this)"$"===t[0]&&delete this[t]},keys:function(){var t=[];for(var n in this)"$"===n[0]&&t.push(n.slice(1));return t},values:function(){var t=[];for(var n in this)"$"===n[0]&&t.push(this[n]);return t},entries:function(){var t=[];for(var n in this)"$"===n[0]&&t.push({key:n.slice(1),value:this[n]});return t},size:function(){var t=0;for(var n in this)"$"===n[0]&&++t;return t},empty:function(){for(var t in this)if("$"===t[0])return!1;return!0},each:function(t){for(var n in this)"$"===n[0]&&t(this[n],n.slice(1),this)}};var Hh=we.prototype;Se.prototype=Ee.prototype={constructor:Se,has:Hh.has,add:function(t){return t+="",this["$"+t]=t,this},remove:Hh.remove,clear:Hh.clear,values:Hh.keys,size:Hh.size,empty:Hh.empty,each:Hh.each};var Xh={},$h={},Vh=34,Wh=10,Zh=13,Gh=function(t){function n(t,n){function e(){if(s)return $h;if(f)return f=!1,Xh;var n,e,r=a;if(t.charCodeAt(r)===Vh){for(;a++=u?s=!0:(e=t.charCodeAt(a++))===Wh?f=!0:e===Zh&&(f=!0,t.charCodeAt(a)===Wh&&++a),t.slice(r+1,n-1).replace(/""/g,'"')}for(;af&&(f=r),il&&(l=i));for(ft||t>i||r>n||n>o))return this;var u,a,c=i-e,s=this._root;switch(a=(n<(r+o)/2)<<1|t<(e+i)/2){case 0:do{u=new Array(4),u[a]=s,s=u}while(c*=2,i=e+c,o=r+c,t>i||n>o);break;case 1:do{u=new Array(4),u[a]=s,s=u}while(c*=2,e=i-c,o=r+c,e>t||n>o);break;case 2:do{u=new Array(4),u[a]=s,s=u}while(c*=2,i=e+c,r=o-c,t>i||r>n);break;case 3:do{u=new Array(4),u[a]=s,s=u}while(c*=2,e=i-c,r=o-c,e>t||r>n)}this._root&&this._root.length&&(this._root=s)}return this._x0=e,this._y0=r,this._x1=i,this._y1=o,this},fp.data=function(){var t=[];return this.visit(function(n){if(!n.length)do{t.push(n.data)}while(n=n.next)}),t},fp.extent=function(t){return arguments.length?this.cover(+t[0][0],+t[0][1]).cover(+t[1][0],+t[1][1]):isNaN(this._x0)?void 0:[[this._x0,this._y0],[this._x1,this._y1]]},fp.find=function(t,n,e){var r,i,o,u,a,c,s,f=this._x0,l=this._y0,h=this._x1,p=this._y1,d=[],v=this._root;for(v&&d.push(new sp(v,f,l,h,p)),null==e?e=1/0:(f=t-e,l=n-e,h=t+e,p=n+e,e*=e);c=d.pop();)if(!(!(v=c.node)||(i=c.x0)>h||(o=c.y0)>p||(u=c.x1)=y)<<1|t>=_)&&(c=d[d.length-1],d[d.length-1]=d[d.length-1-s],d[d.length-1-s]=c)}else{var g=t-+this._x.call(null,v.data),m=n-+this._y.call(null,v.data),x=g*g+m*m;if(x=(a=(d+_)/2))?d=a:_=a,(f=u>=(c=(v+y)/2))?v=c:y=c,n=p,!(p=p[l=f<<1|s]))return this;if(!p.length)break;(n[l+1&3]||n[l+2&3]||n[l+3&3])&&(e=n,h=l)}for(;p.data!==t;)if(r=p,!(p=p.next))return this;return(i=p.next)&&delete p.next,r?(i?r.next=i:delete r.next,this):n?(i?n[l]=i:delete n[l],(p=n[0]||n[1]||n[2]||n[3])&&p===(n[3]||n[2]||n[1]||n[0])&&!p.length&&(e?e[h]=p:this._root=p),this):(this._root=i,this)},fp.removeAll=function(t){for(var n=0,e=t.length;n1?r[0]+r.slice(2):r,+t.slice(e+1)]},vp=function(t){return(t=dp(Math.abs(t)))?t[1]:NaN},_p=function(t,n){return function(e,r){for(var i=e.length,o=[],u=0,a=t[0],c=0;i>0&&a>0&&(c+a+1>r&&(a=Math.max(1,r-c)),o.push(e.substring(i-=a,i+a)),!((c+=a+1)>r));)a=t[u=(u+1)%t.length];return o.reverse().join(n)}},yp=function(t){return function(n){return n.replace(/[0-9]/g,function(n){return t[+n]})}},gp=function(t,n){var e=dp(t,n);if(!e)return t+"";var r=e[0],i=e[1];return i<0?"0."+new Array(-i).join("0")+r:r.length>i+1?r.slice(0,i+1)+"."+r.slice(i+1):r+new Array(i-r.length+2).join("0")},mp={"":function(t,n){t:for(var e,r=(t=t.toPrecision(n)).length,i=1,o=-1;i0&&(o=0)}return o>0?t.slice(0,o)+t.slice(e+1):t},"%":function(t,n){return(100*t).toFixed(n)},b:function(t){return Math.round(t).toString(2)},c:function(t){return t+""},d:function(t){return Math.round(t).toString(10)},e:function(t,n){return t.toExponential(n)},f:function(t,n){return t.toFixed(n)},g:function(t,n){return t.toPrecision(n)},o:function(t){return Math.round(t).toString(8)},p:function(t,n){return gp(100*t,n)},r:gp,s:function(t,n){var e=dp(t,n);if(!e)return t+"";var r=e[0],i=e[1],o=i-(lp=3*Math.max(-8,Math.min(8,Math.floor(i/3))))+1,u=r.length;return o===u?r:o>u?r+new Array(o-u+1).join("0"):o>0?r.slice(0,o)+"."+r.slice(o):"0."+new Array(1-o).join("0")+dp(t,Math.max(0,n+o-1))[0]},X:function(t){return Math.round(t).toString(16).toUpperCase()},x:function(t){return Math.round(t).toString(16)}},xp=/^(?:(.)?([<>=^]))?([+\-\( ])?([$#])?(0)?(\d+)?(,)?(\.\d+)?([a-z%])?$/i;He.prototype=Xe.prototype,Xe.prototype.toString=function(){return this.fill+this.align+this.sign+this.symbol+(this.zero?"0":"")+(null==this.width?"":Math.max(1,0|this.width))+(this.comma?",":"")+(null==this.precision?"":"."+Math.max(0,0|this.precision))+this.type};var bp,wp=function(t){return t},Mp=["y","z","a","f","p","n","µ","m","","k","M","G","T","P","E","Z","Y"],Tp=function(t){function n(t){function n(t){var n,r,u,f=_,x=y;if("c"===v)x=g(t)+x,t="";else{var b=(t=+t)<0;if(t=g(Math.abs(t),d),b&&0==+t&&(b=!1),f=(b?"("===s?s:"-":"-"===s||"("===s?"":s)+f,x=x+("s"===v?Mp[8+lp/3]:"")+(b&&"("===s?")":""),m)for(n=-1,r=t.length;++n(u=t.charCodeAt(n))||u>57){x=(46===u?i+t.slice(n+1):t.slice(n))+x,t=t.slice(0,n);break}}p&&!l&&(t=e(t,1/0));var w=f.length+t.length+x.length,M=w>1)+f+t+x+M.slice(w);break;default:t=M+f+t+x}return o(t)}var a=(t=He(t)).fill,c=t.align,s=t.sign,f=t.symbol,l=t.zero,h=t.width,p=t.comma,d=t.precision,v=t.type,_="$"===f?r[0]:"#"===f&&/[boxX]/.test(v)?"0"+v.toLowerCase():"",y="$"===f?r[1]:/[%p]/.test(v)?u:"",g=mp[v],m=!v||/[defgprs%]/.test(v);return d=null==d?v?6:12:/[gprs]/.test(v)?Math.max(1,Math.min(21,d)):Math.max(0,Math.min(20,d)),n.toString=function(){return t+""},n}var e=t.grouping&&t.thousands?_p(t.grouping,t.thousands):wp,r=t.currency,i=t.decimal,o=t.numerals?yp(t.numerals):wp,u=t.percent||"%";return{format:n,formatPrefix:function(t,e){var r=n((t=He(t),t.type="f",t)),i=3*Math.max(-8,Math.min(8,Math.floor(vp(e)/3))),o=Math.pow(10,-i),u=Mp[8+i/3];return function(t){return r(o*t)+u}}}};$e({decimal:".",thousands:",",grouping:[3],currency:["$",""]});var kp=function(t){return Math.max(0,-vp(Math.abs(t)))},Np=function(t,n){return Math.max(0,3*Math.max(-8,Math.min(8,Math.floor(vp(n)/3)))-vp(Math.abs(t)))},Sp=function(t,n){return t=Math.abs(t),n=Math.abs(n)-t,Math.max(0,vp(n)-vp(t))+1},Ep=function(){return new Ve};Ve.prototype={constructor:Ve,reset:function(){this.s=this.t=0},add:function(t){We(ud,t,this.t),We(this,ud.s,this.s),this.s?this.t+=ud.t:this.s=ud.t},valueOf:function(){return this.s}};var Ap,Cp,zp,Pp,Rp,Lp,qp,Up,Dp,Op,Fp,Ip,Yp,Bp,jp,Hp,Xp,$p,Vp,Wp,Zp,Gp,Jp,Qp,Kp,td,nd,ed,rd,id,od,ud=new Ve,ad=1e-6,cd=Math.PI,sd=cd/2,fd=cd/4,ld=2*cd,hd=180/cd,pd=cd/180,dd=Math.abs,vd=Math.atan,_d=Math.atan2,yd=Math.cos,gd=Math.ceil,md=Math.exp,xd=Math.log,bd=Math.pow,wd=Math.sin,Md=Math.sign||function(t){return t>0?1:t<0?-1:0},Td=Math.sqrt,kd=Math.tan,Nd={Feature:function(t,n){Ke(t.geometry,n)},FeatureCollection:function(t,n){for(var e=t.features,r=-1,i=e.length;++rad?Dp=90:Pd<-ad&&(qp=-90),jp[0]=Lp,jp[1]=Up}},Ld={sphere:Qe,point:Mr,lineStart:kr,lineEnd:Er,polygonStart:function(){Ld.lineStart=Ar,Ld.lineEnd=Cr},polygonEnd:function(){Ld.lineStart=kr,Ld.lineEnd=Er}},qd=function(t){return function(){return t}},Ud=function(t,n){function e(e,r){return e=t(e,r),n(e[0],e[1])}return t.invert&&n.invert&&(e.invert=function(e,r){return(e=n.invert(e,r))&&t.invert(e[0],e[1])}),e};Rr.invert=Rr;var Dd,Od,Fd,Id,Yd,Bd,jd,Hd,Xd,$d,Vd,Wd=function(t){function n(n){return n=t(n[0]*pd,n[1]*pd),n[0]*=hd,n[1]*=hd,n}return t=Lr(t[0]*pd,t[1]*pd,t.length>2?t[2]*pd:0),n.invert=function(n){return n=t.invert(n[0]*pd,n[1]*pd),n[0]*=hd,n[1]*=hd,n},n},Zd=function(){var t,n=[];return{point:function(n,e){t.push([n,e])},lineStart:function(){n.push(t=[])},lineEnd:Qe,rejoin:function(){n.length>1&&n.push(n.pop().concat(n.shift()))},result:function(){var e=n;return n=[],t=null,e}}},Gd=function(t,n,e,r,i,o){var u,a=t[0],c=t[1],s=0,f=1,l=n[0]-a,h=n[1]-c;if(u=e-a,l||!(u>0)){if(u/=l,l<0){if(u0){if(u>f)return;u>s&&(s=u)}if(u=i-a,l||!(u<0)){if(u/=l,l<0){if(u>f)return;u>s&&(s=u)}else if(l>0){if(u0)){if(u/=h,h<0){if(u0){if(u>f)return;u>s&&(s=u)}if(u=o-c,h||!(u<0)){if(u/=h,h<0){if(u>f)return;u>s&&(s=u)}else if(h>0){if(u0&&(t[0]=a+s*l,t[1]=c+s*h),f<1&&(n[0]=a+f*l,n[1]=c+f*h),!0}}}}},Jd=function(t,n){return dd(t[0]-n[0])=0;--o)i.point((f=s[o])[0],f[1]);else r(h.x,h.p.x,-1,i);h=h.p}s=(h=h.o).z,p=!p}while(!h.v);i.lineEnd()}}},Kd=1e9,tv=-Kd,nv=Ep(),ev=function(t,n){var e=n[0],r=n[1],i=[wd(e),-yd(e),0],o=0,u=0;nv.reset();for(var a=0,c=t.length;a=0?1:-1,T=M*w,k=T>cd,N=d*x;if(nv.add(_d(N*M*wd(T),v*b+N*yd(T))),o+=k?w+M*ld:w,k^h>=e^g>=e){var S=sr(ar(l),ar(y));hr(S);var E=sr(i,S);hr(E);var A=(k^w>=0?-1:1)*Ge(E[2]);(r>A||r===A&&(S[0]||S[1]))&&(u+=k^w>=0?1:-1)}}return(o<-ad||oyv&&(yv=t),n<_v&&(_v=n),n>gv&&(gv=n)},lineStart:Qe,lineEnd:Qe,polygonStart:Qe,polygonEnd:Qe,result:function(){var t=[[vv,_v],[yv,gv]];return yv=gv=-(_v=vv=1/0),t}},xv=0,bv=0,wv=0,Mv=0,Tv=0,kv=0,Nv=0,Sv=0,Ev=0,Av={point:oi,lineStart:ui,lineEnd:si,polygonStart:function(){Av.lineStart=fi,Av.lineEnd=li},polygonEnd:function(){Av.point=oi,Av.lineStart=ui,Av.lineEnd=si},result:function(){var t=Ev?[Nv/Ev,Sv/Ev]:kv?[Mv/kv,Tv/kv]:wv?[xv/wv,bv/wv]:[NaN,NaN];return xv=bv=wv=Mv=Tv=kv=Nv=Sv=Ev=0,t}};di.prototype={_radius:4.5,pointRadius:function(t){return this._radius=t,this},polygonStart:function(){this._line=0},polygonEnd:function(){this._line=NaN},lineStart:function(){this._point=0},lineEnd:function(){0===this._line&&this._context.closePath(),this._point=NaN},point:function(t,n){switch(this._point){case 0:this._context.moveTo(t,n),this._point=1;break;case 1:this._context.lineTo(t,n);break;default:this._context.moveTo(t+this._radius,n),this._context.arc(t,n,this._radius,0,ld)}},result:Qe};var Cv,zv,Pv,Rv,Lv,qv=Ep(),Uv={point:Qe,lineStart:function(){Uv.point=vi},lineEnd:function(){Cv&&_i(zv,Pv),Uv.point=Qe},polygonStart:function(){Cv=!0},polygonEnd:function(){Cv=null},result:function(){var t=+qv;return qv.reset(),t}};yi.prototype={_radius:4.5,_circle:gi(4.5),pointRadius:function(t){return(t=+t)!==this._radius&&(this._radius=t,this._circle=null),this},polygonStart:function(){this._line=0},polygonEnd:function(){this._line=NaN},lineStart:function(){this._point=0},lineEnd:function(){0===this._line&&this._string.push("Z"),this._point=NaN},point:function(t,n){switch(this._point){case 0:this._string.push("M",t,",",n),this._point=1;break;case 1:this._string.push("L",t,",",n);break;default:null==this._circle&&(this._circle=gi(this._radius)),this._string.push("M",t,",",n,this._circle)}},result:function(){if(this._string.length){var t=this._string.join("");return this._string=[],t}return null}};var Dv=function(t,n,e,r){return function(i,o){function u(n,e){var r=i(n,e);t(n=r[0],e=r[1])&&o.point(n,e)}function a(t,n){var e=i(t,n);_.point(e[0],e[1])}function c(){b.point=a,_.lineStart()}function s(){b.point=u,_.lineEnd()}function f(t,n){v.push([t,n]);var e=i(t,n);m.point(e[0],e[1])}function l(){m.lineStart(),v=[]}function h(){f(v[0][0],v[0][1]),m.lineEnd();var t,n,e,r,i=m.clean(),u=g.result(),a=u.length;if(v.pop(),p.push(v),v=null,a)if(1&i){if(e=u[0],(n=e.length-1)>0){for(x||(o.polygonStart(),x=!0),o.lineStart(),t=0;t1&&2&i&&u.push(u.pop().concat(u.shift())),d.push(u.filter(mi))}var p,d,v,_=n(o),y=i.invert(r[0],r[1]),g=Zd(),m=n(g),x=!1,b={point:u,lineStart:c,lineEnd:s,polygonStart:function(){b.point=f,b.lineStart=l,b.lineEnd=h,d=[],p=[]},polygonEnd:function(){b.point=u,b.lineStart=c,b.lineEnd=s,d=Cs(d);var t=ev(p,y);d.length?(x||(o.polygonStart(),x=!0),Qd(d,xi,t,e,o)):t&&(x||(o.polygonStart(),x=!0),o.lineStart(),e(null,null,1,o),o.lineEnd()),x&&(o.polygonEnd(),x=!1),d=p=null},sphere:function(){o.polygonStart(),o.lineStart(),e(null,null,1,o),o.lineEnd(),o.polygonEnd()}};return b}},Ov=Dv(function(){return!0},function(t){var n,e=NaN,r=NaN,i=NaN;return{lineStart:function(){t.lineStart(),n=1},point:function(o,u){var a=o>0?cd:-cd,c=dd(o-e);dd(c-cd)0?sd:-sd),t.point(i,r),t.lineEnd(),t.lineStart(),t.point(a,r),t.point(o,r),n=0):i!==a&&c>=cd&&(dd(e-i)ad){var o=t[0]o}function r(t,n,e){var r=[1,0,0],i=sr(ar(t),ar(n)),u=cr(i,i),a=i[0],c=u-a*a;if(!c)return!e&&t;var s=o*u/c,f=-o*a/c,l=sr(r,i),h=lr(r,s);fr(h,lr(i,f));var p=l,d=cr(h,p),v=cr(p,p),_=d*d-v*(cr(h,h)-1);if(!(_<0)){var y=Td(_),g=lr(p,(-d-y)/v);if(fr(g,h),g=ur(g),!e)return g;var m,x=t[0],b=n[0],w=t[1],M=n[1];b0^g[1]<(dd(g[0]-x)cd^(x<=g[0]&&g[0]<=b)){var S=lr(p,(-d+y)/v);return fr(S,h),[g,ur(S)]}}}function i(n,e){var r=u?t:cd-t,i=0;return n<-r?i|=1:n>r&&(i|=2),e<-r?i|=4:e>r&&(i|=8),i}var o=yd(t),u=o>0,a=dd(o)>ad;return Dv(e,function(t){var n,o,c,s,f;return{lineStart:function(){s=c=!1,f=1},point:function(l,h){var p,d=[l,h],v=e(l,h),_=u?v?0:i(l,h):v?i(l+(l<0?cd:-cd),h):0;if(!n&&(s=c=v)&&t.lineStart(),v!==c&&(!(p=r(n,d))||Jd(n,p)||Jd(d,p))&&(d[0]+=ad,d[1]+=ad,v=e(d[0],d[1])),v!==c)f=0,v?(t.lineStart(),p=r(d,n),t.point(p[0],p[1])):(p=r(n,d),t.point(p[0],p[1]),t.lineEnd()),n=p;else if(a&&n&&u^v){var y;_&o||!(y=r(d,n,!0))||(f=0,u?(t.lineStart(),t.point(y[0][0],y[0][1]),t.point(y[1][0],y[1][1]),t.lineEnd()):(t.point(y[1][0],y[1][1]),t.lineEnd(),t.lineStart(),t.point(y[0][0],y[0][1])))}!v||n&&Jd(n,d)||t.point(d[0],d[1]),n=d,c=v,o=_},lineEnd:function(){c&&t.lineEnd(),n=null},clean:function(){return f|(s&&c)<<1}}},function(e,r,i,o){Or(o,t,n,i,e,r)},u?[0,-t]:[-cd,t-cd])};Mi.prototype={constructor:Mi,point:function(t,n){this.stream.point(t,n)},sphere:function(){this.stream.sphere()},lineStart:function(){this.stream.lineStart()},lineEnd:function(){this.stream.lineEnd()},polygonStart:function(){this.stream.polygonStart()},polygonEnd:function(){this.stream.polygonEnd()}};var Iv=16,Yv=yd(30*pd),Bv=function(t,n){return+n?Si(t,n):Ni(t)},jv=wi({point:function(t,n){this.stream.point(t*pd,n*pd)}}),Hv=function(){return Ci(Pi).scale(155.424).center([0,33.6442])},Xv=function(){return Hv().parallels([29.5,45.5]).scale(1070).translate([480,250]).rotate([96,0]).center([-.6,38.7])},$v=Li(function(t){return Td(2/(1+t))});$v.invert=qi(function(t){return 2*Ge(t/2)});var Vv=Li(function(t){return(t=Ze(t))&&t/wd(t)});Vv.invert=qi(function(t){return t});Ui.invert=function(t,n){return[t,2*vd(md(n))-sd]};Ii.invert=Ii;Bi.invert=qi(vd);Hi.invert=qi(Ge);Xi.invert=qi(function(t){return 2*vd(t)});$i.invert=function(t,n){return[-n,2*vd(md(t))-sd]};uo.prototype=eo.prototype={constructor:uo,count:function(){return this.eachAfter(to)},each:function(t){var n,e,r,i,o=this,u=[o];do{for(n=u.reverse(),u=[];o=n.pop();)if(t(o),e=o.children)for(r=0,i=e.length;r=0;--e)i.push(n[e]);return this},sum:function(t){return this.eachAfter(function(n){for(var e=+t(n.data)||0,r=n.children,i=r&&r.length;--i>=0;)e+=r[i].value;n.value=e})},sort:function(t){return this.eachBefore(function(n){n.children&&n.children.sort(t)})},path:function(t){for(var n=this,e=no(n,t),r=[n];n!==e;)n=n.parent,r.push(n);for(var i=r.length;t!==e;)r.splice(i,0,t),t=t.parent;return r},ancestors:function(){for(var t=this,n=[t];t=t.parent;)n.push(t);return n},descendants:function(){var t=[];return this.each(function(n){t.push(n)}),t},leaves:function(){var t=[];return this.eachBefore(function(n){n.children||t.push(n)}),t},links:function(){var t=this,n=[];return t.each(function(e){e!==t&&n.push({source:e.parent,target:e})}),n},copy:function(){return eo(this).eachBefore(io)}};var Wv=Array.prototype.slice,Zv=function(t){for(var n,e,r=0,i=(t=ao(Wv.call(t))).length,o=[];r1?n:1)},e}(r_),o_=function t(n){function e(t,e,r,i,o){if((u=t._squarify)&&u.ratio===n)for(var u,a,c,s,f,l=-1,h=u.length,p=t.value;++l1?n:1)},e}(r_),u_=function(t,n,e){return(n[0]-t[0])*(e[1]-t[1])-(n[1]-t[1])*(e[0]-t[0])},a_=[].slice,c_={};Bo.prototype=Wo.prototype={constructor:Bo,defer:function(t){if("function"!=typeof t)throw new Error("invalid callback");if(this._call)throw new Error("defer after await");if(null!=this._error)return this;var n=a_.call(arguments,1);return n.push(t),++this._waiting,this._tasks.push(n),jo(this),this},abort:function(){return null==this._error&&$o(this,new Error("abort")),this},await:function(t){if("function"!=typeof t)throw new Error("invalid callback");if(this._call)throw new Error("multiple await");return this._call=function(n,e){t.apply(null,[n].concat(e))},Vo(this),this},awaitAll:function(t){if("function"!=typeof t)throw new Error("invalid callback");if(this._call)throw new Error("multiple await");return this._call=t,Vo(this),this}};var s_=function(){return Math.random()},f_=function t(n){function e(t,e){return t=null==t?0:+t,e=null==e?1:+e,1===arguments.length?(e=t,t=0):e-=t,function(){return n()*e+t}}return e.source=t,e}(s_),l_=function t(n){function e(t,e){var r,i;return t=null==t?0:+t,e=null==e?1:+e,function(){var o;if(null!=r)o=r,r=null;else do{r=2*n()-1,o=2*n()-1,i=r*r+o*o}while(!i||i>1);return t+e*o*Math.sqrt(-2*Math.log(i)/i)}}return e.source=t,e}(s_),h_=function t(n){function e(){var t=l_.source(n).apply(this,arguments);return function(){return Math.exp(t())}}return e.source=t,e}(s_),p_=function t(n){function e(t){return function(){for(var e=0,r=0;r=200&&e<300||304===e){if(o)try{n=o.call(r,s)}catch(t){return void a.call("error",r,t)}else n=s;a.call("load",r,n)}else a.call("error",r,t)}var r,i,o,u,a=h("beforesend","progress","load","error"),c=we(),s=new XMLHttpRequest,f=null,l=null,p=0;if("undefined"==typeof XDomainRequest||"withCredentials"in s||!/^(http(s)?:)?\/\//.test(t)||(s=new XDomainRequest),"onload"in s?s.onload=s.onerror=s.ontimeout=e:s.onreadystatechange=function(t){s.readyState>3&&e(t)},s.onprogress=function(t){a.call("progress",r,t)},r={header:function(t,n){return t=(t+"").toLowerCase(),arguments.length<2?c.get(t):(null==n?c.remove(t):c.set(t,n+""),r)},mimeType:function(t){return arguments.length?(i=null==t?null:t+"",r):i},responseType:function(t){return arguments.length?(u=t,r):u},timeout:function(t){return arguments.length?(p=+t,r):p},user:function(t){return arguments.length<1?f:(f=null==t?null:t+"",r)},password:function(t){return arguments.length<1?l:(l=null==t?null:t+"",r)},response:function(t){return o=t,r},get:function(t,n){return r.send("GET",t,n)},post:function(t,n){return r.send("POST",t,n)},send:function(n,e,o){return s.open(n,t,!0,f,l),null==i||c.has("accept")||c.set("accept",i+",*/*"),s.setRequestHeader&&c.each(function(t,n){s.setRequestHeader(n,t)}),null!=i&&s.overrideMimeType&&s.overrideMimeType(i),null!=u&&(s.responseType=u),p>0&&(s.timeout=p),null==o&&"function"==typeof e&&(o=e,e=null),null!=o&&1===o.length&&(o=Zo(o)),null!=o&&r.on("error",o).on("load",function(t){o(null,t)}),a.call("beforesend",r,s),s.send(null==e?null:e),r},abort:function(){return s.abort(),r},on:function(){var t=a.on.apply(a,arguments);return t===a?r:t}},null!=n){if("function"!=typeof n)throw new Error("invalid callback: "+n);return r.get(n)}return r},y_=function(t,n){return function(e,r){var i=__(e).mimeType(t).response(n);if(null!=r){if("function"!=typeof r)throw new Error("invalid callback: "+r);return i.get(r)}return i}},g_=y_("text/html",function(t){return document.createRange().createContextualFragment(t.responseText)}),m_=y_("application/json",function(t){return JSON.parse(t.responseText)}),x_=y_("text/plain",function(t){return t.responseText}),b_=y_("application/xml",function(t){var n=t.responseXML;if(!n)throw new Error("parse error");return n}),w_=function(t,n){return function(e,r,i){arguments.length<3&&(i=r,r=null);var o=__(e).mimeType(t);return o.row=function(t){return arguments.length?o.response(Jo(n,r=t)):r},o.row(r),i?o.get(i):o}},M_=w_("text/csv",Qh),T_=w_("text/tab-separated-values",rp),k_=Array.prototype,N_=k_.map,S_=k_.slice,E_={name:"implicit"},A_=function(t){return function(){return t}},C_=function(t){return+t},z_=[0,1],P_=function(n,e,r){var o,u=n[0],a=n[n.length-1],c=i(u,a,null==e?10:e);switch((r=He(null==r?",f":r)).type){case"s":var s=Math.max(Math.abs(u),Math.abs(a));return null!=r.precision||isNaN(o=Np(c,s))||(r.precision=o),t.formatPrefix(r,s);case"":case"e":case"g":case"p":case"r":null!=r.precision||isNaN(o=Sp(c,Math.max(Math.abs(u),Math.abs(a))))||(r.precision=o-("e"===r.type));break;case"f":case"%":null!=r.precision||isNaN(o=kp(c))||(r.precision=o-2*("%"===r.type))}return t.format(r)},R_=function(t,n){var e,r=0,i=(t=t.slice()).length-1,o=t[r],u=t[i];return u0?t>1?Mu(function(n){n.setTime(Math.floor(n/t)*t)},function(n,e){n.setTime(+n+e*t)},function(n,e){return(e-n)/t}):U_:null};var D_=U_.range,O_=6e4,F_=6048e5,I_=Mu(function(t){t.setTime(1e3*Math.floor(t/1e3))},function(t,n){t.setTime(+t+1e3*n)},function(t,n){return(n-t)/1e3},function(t){return t.getUTCSeconds()}),Y_=I_.range,B_=Mu(function(t){t.setTime(Math.floor(t/O_)*O_)},function(t,n){t.setTime(+t+n*O_)},function(t,n){return(n-t)/O_},function(t){return t.getMinutes()}),j_=B_.range,H_=Mu(function(t){var n=t.getTimezoneOffset()*O_%36e5;n<0&&(n+=36e5),t.setTime(36e5*Math.floor((+t-n)/36e5)+n)},function(t,n){t.setTime(+t+36e5*n)},function(t,n){return(n-t)/36e5},function(t){return t.getHours()}),X_=H_.range,$_=Mu(function(t){t.setHours(0,0,0,0)},function(t,n){t.setDate(t.getDate()+n)},function(t,n){return(n-t-(n.getTimezoneOffset()-t.getTimezoneOffset())*O_)/864e5},function(t){return t.getDate()-1}),V_=$_.range,W_=Tu(0),Z_=Tu(1),G_=Tu(2),J_=Tu(3),Q_=Tu(4),K_=Tu(5),ty=Tu(6),ny=W_.range,ey=Z_.range,ry=G_.range,iy=J_.range,oy=Q_.range,uy=K_.range,ay=ty.range,cy=Mu(function(t){t.setDate(1),t.setHours(0,0,0,0)},function(t,n){t.setMonth(t.getMonth()+n)},function(t,n){return n.getMonth()-t.getMonth()+12*(n.getFullYear()-t.getFullYear())},function(t){return t.getMonth()}),sy=cy.range,fy=Mu(function(t){t.setMonth(0,1),t.setHours(0,0,0,0)},function(t,n){t.setFullYear(t.getFullYear()+n)},function(t,n){return n.getFullYear()-t.getFullYear()},function(t){return t.getFullYear()});fy.every=function(t){return isFinite(t=Math.floor(t))&&t>0?Mu(function(n){n.setFullYear(Math.floor(n.getFullYear()/t)*t),n.setMonth(0,1),n.setHours(0,0,0,0)},function(n,e){n.setFullYear(n.getFullYear()+e*t)}):null};var ly=fy.range,hy=Mu(function(t){t.setUTCSeconds(0,0)},function(t,n){t.setTime(+t+n*O_)},function(t,n){return(n-t)/O_},function(t){return t.getUTCMinutes()}),py=hy.range,dy=Mu(function(t){t.setUTCMinutes(0,0,0)},function(t,n){t.setTime(+t+36e5*n)},function(t,n){return(n-t)/36e5},function(t){return t.getUTCHours()}),vy=dy.range,_y=Mu(function(t){t.setUTCHours(0,0,0,0)},function(t,n){t.setUTCDate(t.getUTCDate()+n)},function(t,n){return(n-t)/864e5},function(t){return t.getUTCDate()-1}),yy=_y.range,gy=ku(0),my=ku(1),xy=ku(2),by=ku(3),wy=ku(4),My=ku(5),Ty=ku(6),ky=gy.range,Ny=my.range,Sy=xy.range,Ey=by.range,Ay=wy.range,Cy=My.range,zy=Ty.range,Py=Mu(function(t){t.setUTCDate(1),t.setUTCHours(0,0,0,0)},function(t,n){t.setUTCMonth(t.getUTCMonth()+n)},function(t,n){return n.getUTCMonth()-t.getUTCMonth()+12*(n.getUTCFullYear()-t.getUTCFullYear())},function(t){return t.getUTCMonth()}),Ry=Py.range,Ly=Mu(function(t){t.setUTCMonth(0,1),t.setUTCHours(0,0,0,0)},function(t,n){t.setUTCFullYear(t.getUTCFullYear()+n)},function(t,n){return n.getUTCFullYear()-t.getUTCFullYear()},function(t){return t.getUTCFullYear()});Ly.every=function(t){return isFinite(t=Math.floor(t))&&t>0?Mu(function(n){n.setUTCFullYear(Math.floor(n.getUTCFullYear()/t)*t),n.setUTCMonth(0,1),n.setUTCHours(0,0,0,0)},function(n,e){n.setUTCFullYear(n.getUTCFullYear()+e*t)}):null};var qy,Uy=Ly.range,Dy={"-":"",_:" ",0:"0"},Oy=/^\s*\d+/,Fy=/^%/,Iy=/[\\\^\$\*\+\?\|\[\]\(\)\.\{\}]/g;Ma({dateTime:"%x, %X",date:"%-m/%-d/%Y",time:"%-I:%M:%S %p",periods:["AM","PM"],days:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"],shortDays:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],months:["January","February","March","April","May","June","July","August","September","October","November","December"],shortMonths:["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"]});var Yy=Date.prototype.toISOString?function(t){return t.toISOString()}:t.utcFormat("%Y-%m-%dT%H:%M:%S.%LZ"),By=+new Date("2000-01-01T00:00:00.000Z")?function(t){var n=new Date(t);return isNaN(n)?null:n}:t.utcParse("%Y-%m-%dT%H:%M:%S.%LZ"),jy=1e3,Hy=60*jy,Xy=60*Hy,$y=24*Xy,Vy=7*$y,Wy=30*$y,Zy=365*$y,Gy=function(t){return t.match(/.{6}/g).map(function(t){return"#"+t})},Jy=Gy("1f77b4ff7f0e2ca02cd627289467bd8c564be377c27f7f7fbcbd2217becf"),Qy=Gy("393b795254a36b6ecf9c9ede6379398ca252b5cf6bcedb9c8c6d31bd9e39e7ba52e7cb94843c39ad494ad6616be7969c7b4173a55194ce6dbdde9ed6"),Ky=Gy("3182bd6baed69ecae1c6dbefe6550dfd8d3cfdae6bfdd0a231a35474c476a1d99bc7e9c0756bb19e9ac8bcbddcdadaeb636363969696bdbdbdd9d9d9"),tg=Gy("1f77b4aec7e8ff7f0effbb782ca02c98df8ad62728ff98969467bdc5b0d58c564bc49c94e377c2f7b6d27f7f7fc7c7c7bcbd22dbdb8d17becf9edae5"),ng=wl($t(300,.5,0),$t(-240,.5,1)),eg=wl($t(-100,.75,.35),$t(80,1.5,.8)),rg=wl($t(260,.75,.35),$t(80,1.5,.8)),ig=$t(),og=Sa(Gy("44015444025645045745055946075a46085c460a5d460b5e470d60470e6147106347116447136548146748166848176948186a481a6c481b6d481c6e481d6f481f70482071482173482374482475482576482677482878482979472a7a472c7a472d7b472e7c472f7d46307e46327e46337f463480453581453781453882443983443a83443b84433d84433e85423f854240864241864142874144874045884046883f47883f48893e49893e4a893e4c8a3d4d8a3d4e8a3c4f8a3c508b3b518b3b528b3a538b3a548c39558c39568c38588c38598c375a8c375b8d365c8d365d8d355e8d355f8d34608d34618d33628d33638d32648e32658e31668e31678e31688e30698e306a8e2f6b8e2f6c8e2e6d8e2e6e8e2e6f8e2d708e2d718e2c718e2c728e2c738e2b748e2b758e2a768e2a778e2a788e29798e297a8e297b8e287c8e287d8e277e8e277f8e27808e26818e26828e26828e25838e25848e25858e24868e24878e23888e23898e238a8d228b8d228c8d228d8d218e8d218f8d21908d21918c20928c20928c20938c1f948c1f958b1f968b1f978b1f988b1f998a1f9a8a1e9b8a1e9c891e9d891f9e891f9f881fa0881fa1881fa1871fa28720a38620a48621a58521a68522a78522a88423a98324aa8325ab8225ac8226ad8127ad8128ae8029af7f2ab07f2cb17e2db27d2eb37c2fb47c31b57b32b67a34b67935b77937b87838b9773aba763bbb753dbc743fbc7340bd7242be7144bf7046c06f48c16e4ac16d4cc26c4ec36b50c46a52c56954c56856c66758c7655ac8645cc8635ec96260ca6063cb5f65cb5e67cc5c69cd5b6ccd5a6ece5870cf5773d05675d05477d1537ad1517cd2507fd34e81d34d84d44b86d54989d5488bd6468ed64590d74393d74195d84098d83e9bd93c9dd93ba0da39a2da37a5db36a8db34aadc32addc30b0dd2fb2dd2db5de2bb8de29bade28bddf26c0df25c2df23c5e021c8e020cae11fcde11dd0e11cd2e21bd5e21ad8e219dae319dde318dfe318e2e418e5e419e7e419eae51aece51befe51cf1e51df4e61ef6e620f8e621fbe723fde725")),ug=Sa(Gy("00000401000501010601010802010902020b02020d03030f03031204041405041606051806051a07061c08071e0907200a08220b09240c09260d0a290e0b2b100b2d110c2f120d31130d34140e36150e38160f3b180f3d19103f1a10421c10441d11471e114920114b21114e22115024125325125527125829115a2a115c2c115f2d11612f116331116533106734106936106b38106c390f6e3b0f703d0f713f0f72400f74420f75440f764510774710784910784a10794c117a4e117b4f127b51127c52137c54137d56147d57157e59157e5a167e5c167f5d177f5f187f601880621980641a80651a80671b80681c816a1c816b1d816d1d816e1e81701f81721f817320817521817621817822817922827b23827c23827e24828025828125818326818426818627818827818928818b29818c29818e2a81902a81912b81932b80942c80962c80982d80992d809b2e7f9c2e7f9e2f7fa02f7fa1307ea3307ea5317ea6317da8327daa337dab337cad347cae347bb0357bb2357bb3367ab5367ab73779b83779ba3878bc3978bd3977bf3a77c03a76c23b75c43c75c53c74c73d73c83e73ca3e72cc3f71cd4071cf4070d0416fd2426fd3436ed5446dd6456cd8456cd9466bdb476adc4869de4968df4a68e04c67e24d66e34e65e44f64e55064e75263e85362e95462ea5661eb5760ec5860ed5a5fee5b5eef5d5ef05f5ef1605df2625df2645cf3655cf4675cf4695cf56b5cf66c5cf66e5cf7705cf7725cf8745cf8765cf9785df9795df97b5dfa7d5efa7f5efa815ffb835ffb8560fb8761fc8961fc8a62fc8c63fc8e64fc9065fd9266fd9467fd9668fd9869fd9a6afd9b6bfe9d6cfe9f6dfea16efea36ffea571fea772fea973feaa74feac76feae77feb078feb27afeb47bfeb67cfeb77efeb97ffebb81febd82febf84fec185fec287fec488fec68afec88cfeca8dfecc8ffecd90fecf92fed194fed395fed597fed799fed89afdda9cfddc9efddea0fde0a1fde2a3fde3a5fde5a7fde7a9fde9aafdebacfcecaefceeb0fcf0b2fcf2b4fcf4b6fcf6b8fcf7b9fcf9bbfcfbbdfcfdbf")),ag=Sa(Gy("00000401000501010601010802010a02020c02020e03021004031204031405041706041907051b08051d09061f0a07220b07240c08260d08290e092b10092d110a30120a32140b34150b37160b39180c3c190c3e1b0c411c0c431e0c451f0c48210c4a230c4c240c4f260c51280b53290b552b0b572d0b592f0a5b310a5c320a5e340a5f3609613809623909633b09643d09653e0966400a67420a68440a68450a69470b6a490b6a4a0c6b4c0c6b4d0d6c4f0d6c510e6c520e6d540f6d550f6d57106e59106e5a116e5c126e5d126e5f136e61136e62146e64156e65156e67166e69166e6a176e6c186e6d186e6f196e71196e721a6e741a6e751b6e771c6d781c6d7a1d6d7c1d6d7d1e6d7f1e6c801f6c82206c84206b85216b87216b88226a8a226a8c23698d23698f24699025689225689326679526679727669827669a28659b29649d29649f2a63a02a63a22b62a32c61a52c60a62d60a82e5fa92e5eab2f5ead305dae305cb0315bb1325ab3325ab43359b63458b73557b93556ba3655bc3754bd3853bf3952c03a51c13a50c33b4fc43c4ec63d4dc73e4cc83f4bca404acb4149cc4248ce4347cf4446d04545d24644d34743d44842d54a41d74b3fd84c3ed94d3dda4e3cdb503bdd513ade5238df5337e05536e15635e25734e35933e45a31e55c30e65d2fe75e2ee8602de9612bea632aeb6429eb6628ec6726ed6925ee6a24ef6c23ef6e21f06f20f1711ff1731df2741cf3761bf37819f47918f57b17f57d15f67e14f68013f78212f78410f8850ff8870ef8890cf98b0bf98c0af98e09fa9008fa9207fa9407fb9606fb9706fb9906fb9b06fb9d07fc9f07fca108fca309fca50afca60cfca80dfcaa0ffcac11fcae12fcb014fcb216fcb418fbb61afbb81dfbba1ffbbc21fbbe23fac026fac228fac42afac62df9c72ff9c932f9cb35f8cd37f8cf3af7d13df7d340f6d543f6d746f5d949f5db4cf4dd4ff4df53f4e156f3e35af3e55df2e661f2e865f2ea69f1ec6df1ed71f1ef75f1f179f2f27df2f482f3f586f3f68af4f88ef5f992f6fa96f8fb9af9fc9dfafda1fcffa4")),cg=Sa(Gy("0d088710078813078916078a19068c1b068d1d068e20068f2206902406912605912805922a05932c05942e05952f059631059733059735049837049938049a3a049a3c049b3e049c3f049c41049d43039e44039e46039f48039f4903a04b03a14c02a14e02a25002a25102a35302a35502a45601a45801a45901a55b01a55c01a65e01a66001a66100a76300a76400a76600a76700a86900a86a00a86c00a86e00a86f00a87100a87201a87401a87501a87701a87801a87a02a87b02a87d03a87e03a88004a88104a78305a78405a78606a68707a68808a68a09a58b0aa58d0ba58e0ca48f0da4910ea3920fa39410a29511a19613a19814a099159f9a169f9c179e9d189d9e199da01a9ca11b9ba21d9aa31e9aa51f99a62098a72197a82296aa2395ab2494ac2694ad2793ae2892b02991b12a90b22b8fb32c8eb42e8db52f8cb6308bb7318ab83289ba3388bb3488bc3587bd3786be3885bf3984c03a83c13b82c23c81c33d80c43e7fc5407ec6417dc7427cc8437bc9447aca457acb4679cc4778cc4977cd4a76ce4b75cf4c74d04d73d14e72d24f71d35171d45270d5536fd5546ed6556dd7566cd8576bd9586ada5a6ada5b69db5c68dc5d67dd5e66de5f65de6164df6263e06363e16462e26561e26660e3685fe4695ee56a5de56b5de66c5ce76e5be76f5ae87059e97158e97257ea7457eb7556eb7655ec7754ed7953ed7a52ee7b51ef7c51ef7e50f07f4ff0804ef1814df1834cf2844bf3854bf3874af48849f48948f58b47f58c46f68d45f68f44f79044f79143f79342f89441f89540f9973ff9983ef99a3efa9b3dfa9c3cfa9e3bfb9f3afba139fba238fca338fca537fca636fca835fca934fdab33fdac33fdae32fdaf31fdb130fdb22ffdb42ffdb52efeb72dfeb82cfeba2cfebb2bfebd2afebe2afec029fdc229fdc328fdc527fdc627fdc827fdca26fdcb26fccd25fcce25fcd025fcd225fbd324fbd524fbd724fad824fada24f9dc24f9dd25f8df25f8e125f7e225f7e425f6e626f6e826f5e926f5eb27f4ed27f3ee27f3f027f2f227f1f426f1f525f0f724f0f921")),sg=function(t){return function(){return t}},fg=Math.abs,lg=Math.atan2,hg=Math.cos,pg=Math.max,dg=Math.min,vg=Math.sin,_g=Math.sqrt,yg=1e-12,gg=Math.PI,mg=gg/2,xg=2*gg;Oa.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._point=0},lineEnd:function(){(this._line||0!==this._line&&1===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,n){switch(t=+t,n=+n,this._point){case 0:this._point=1,this._line?this._context.lineTo(t,n):this._context.moveTo(t,n);break;case 1:this._point=2;default:this._context.lineTo(t,n)}}};var bg=function(t){return new Oa(t)},wg=function(){function t(t){var a,c,s,f=t.length,l=!1;for(null==i&&(u=o(s=ve())),a=0;a<=f;++a)!(a=f;--l)s.point(_[l],y[l]);s.lineEnd(),s.areaEnd()}v&&(_[n]=+e(h,n,t),y[n]=+i(h,n,t),s.point(r?+r(h,n,t):_[n],o?+o(h,n,t):y[n]))}if(p)return s=null,p+""||null}function n(){return wg().defined(u).curve(c).context(a)}var e=Fa,r=null,i=sg(0),o=Ia,u=sg(!0),a=null,c=bg,s=null;return t.x=function(n){return arguments.length?(e="function"==typeof n?n:sg(+n),r=null,t):e},t.x0=function(n){return arguments.length?(e="function"==typeof n?n:sg(+n),t):e},t.x1=function(n){return arguments.length?(r=null==n?null:"function"==typeof n?n:sg(+n),t):r},t.y=function(n){return arguments.length?(i="function"==typeof n?n:sg(+n),o=null,t):i},t.y0=function(n){return arguments.length?(i="function"==typeof n?n:sg(+n),t):i},t.y1=function(n){return arguments.length?(o=null==n?null:"function"==typeof n?n:sg(+n),t):o},t.lineX0=t.lineY0=function(){return n().x(e).y(i)},t.lineY1=function(){return n().x(e).y(o)},t.lineX1=function(){return n().x(r).y(i)},t.defined=function(n){return arguments.length?(u="function"==typeof n?n:sg(!!n),t):u},t.curve=function(n){return arguments.length?(c=n,null!=a&&(s=c(a)),t):c},t.context=function(n){return arguments.length?(null==n?a=s=null:s=c(a=n),t):a},t},Tg=function(t,n){return nt?1:n>=t?0:NaN},kg=function(t){return t},Ng=Ba(bg);Ya.prototype={areaStart:function(){this._curve.areaStart()},areaEnd:function(){this._curve.areaEnd()},lineStart:function(){this._curve.lineStart()},lineEnd:function(){this._curve.lineEnd()},point:function(t,n){this._curve.point(n*Math.sin(t),n*-Math.cos(t))}};var Sg=function(){return ja(wg().curve(Ng))},Eg=function(){var t=Mg().curve(Ng),n=t.curve,e=t.lineX0,r=t.lineX1,i=t.lineY0,o=t.lineY1;return t.angle=t.x,delete t.x,t.startAngle=t.x0,delete t.x0,t.endAngle=t.x1,delete t.x1,t.radius=t.y,delete t.y,t.innerRadius=t.y0,delete t.y0,t.outerRadius=t.y1,delete t.y1,t.lineStartAngle=function(){return ja(e())},delete t.lineX0,t.lineEndAngle=function(){return ja(r())},delete t.lineX1,t.lineInnerRadius=function(){return ja(i())},delete t.lineY0,t.lineOuterRadius=function(){return ja(o())},delete t.lineY1,t.curve=function(t){return arguments.length?n(Ba(t)):n()._curve},t},Ag=function(t,n){return[(n=+n)*Math.cos(t-=Math.PI/2),n*Math.sin(t)]},Cg=Array.prototype.slice,zg={draw:function(t,n){var e=Math.sqrt(n/gg);t.moveTo(e,0),t.arc(0,0,e,0,xg)}},Pg={draw:function(t,n){var e=Math.sqrt(n/5)/2;t.moveTo(-3*e,-e),t.lineTo(-e,-e),t.lineTo(-e,-3*e),t.lineTo(e,-3*e),t.lineTo(e,-e),t.lineTo(3*e,-e),t.lineTo(3*e,e),t.lineTo(e,e),t.lineTo(e,3*e),t.lineTo(-e,3*e),t.lineTo(-e,e),t.lineTo(-3*e,e),t.closePath()}},Rg=Math.sqrt(1/3),Lg=2*Rg,qg={draw:function(t,n){var e=Math.sqrt(n/Lg),r=e*Rg;t.moveTo(0,-e),t.lineTo(r,0),t.lineTo(0,e),t.lineTo(-r,0),t.closePath()}},Ug=Math.sin(gg/10)/Math.sin(7*gg/10),Dg=Math.sin(xg/10)*Ug,Og=-Math.cos(xg/10)*Ug,Fg={draw:function(t,n){var e=Math.sqrt(.8908130915292852*n),r=Dg*e,i=Og*e;t.moveTo(0,-e),t.lineTo(r,i);for(var o=1;o<5;++o){var u=xg*o/5,a=Math.cos(u),c=Math.sin(u);t.lineTo(c*e,-a*e),t.lineTo(a*r-c*i,c*r+a*i)}t.closePath()}},Ig={draw:function(t,n){var e=Math.sqrt(n),r=-e/2;t.rect(r,r,e,e)}},Yg=Math.sqrt(3),Bg={draw:function(t,n){var e=-Math.sqrt(n/(3*Yg));t.moveTo(0,2*e),t.lineTo(-Yg*e,-e),t.lineTo(Yg*e,-e),t.closePath()}},jg=-.5,Hg=Math.sqrt(3)/2,Xg=1/Math.sqrt(12),$g=3*(Xg/2+1),Vg={draw:function(t,n){var e=Math.sqrt(n/$g),r=e/2,i=e*Xg,o=r,u=e*Xg+e,a=-o,c=u;t.moveTo(r,i),t.lineTo(o,u),t.lineTo(a,c),t.lineTo(jg*r-Hg*i,Hg*r+jg*i),t.lineTo(jg*o-Hg*u,Hg*o+jg*u),t.lineTo(jg*a-Hg*c,Hg*a+jg*c),t.lineTo(jg*r+Hg*i,jg*i-Hg*r),t.lineTo(jg*o+Hg*u,jg*u-Hg*o),t.lineTo(jg*a+Hg*c,jg*c-Hg*a),t.closePath()}},Wg=[zg,Pg,qg,Ig,Fg,Bg,Vg],Zg=function(){};Ja.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._y0=this._y1=NaN,this._point=0},lineEnd:function(){switch(this._point){case 3:Ga(this,this._x1,this._y1);case 2:this._context.lineTo(this._x1,this._y1)}(this._line||0!==this._line&&1===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,n){switch(t=+t,n=+n,this._point){case 0:this._point=1,this._line?this._context.lineTo(t,n):this._context.moveTo(t,n);break;case 1:this._point=2;break;case 2:this._point=3,this._context.lineTo((5*this._x0+this._x1)/6,(5*this._y0+this._y1)/6);default:Ga(this,t,n)}this._x0=this._x1,this._x1=t,this._y0=this._y1,this._y1=n}};Qa.prototype={areaStart:Zg,areaEnd:Zg,lineStart:function(){this._x0=this._x1=this._x2=this._x3=this._x4=this._y0=this._y1=this._y2=this._y3=this._y4=NaN,this._point=0},lineEnd:function(){switch(this._point){case 1:this._context.moveTo(this._x2,this._y2),this._context.closePath();break;case 2:this._context.moveTo((this._x2+2*this._x3)/3,(this._y2+2*this._y3)/3),this._context.lineTo((this._x3+2*this._x2)/3,(this._y3+2*this._y2)/3),this._context.closePath();break;case 3:this.point(this._x2,this._y2),this.point(this._x3,this._y3),this.point(this._x4,this._y4)}},point:function(t,n){switch(t=+t,n=+n,this._point){case 0:this._point=1,this._x2=t,this._y2=n;break;case 1:this._point=2,this._x3=t,this._y3=n;break;case 2:this._point=3,this._x4=t,this._y4=n,this._context.moveTo((this._x0+4*this._x1+t)/6,(this._y0+4*this._y1+n)/6);break;default:Ga(this,t,n)}this._x0=this._x1,this._x1=t,this._y0=this._y1,this._y1=n}};Ka.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._y0=this._y1=NaN,this._point=0},lineEnd:function(){(this._line||0!==this._line&&3===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,n){switch(t=+t,n=+n,this._point){case 0:this._point=1;break;case 1:this._point=2;break;case 2:this._point=3;var e=(this._x0+4*this._x1+t)/6,r=(this._y0+4*this._y1+n)/6;this._line?this._context.lineTo(e,r):this._context.moveTo(e,r);break;case 3:this._point=4;default:Ga(this,t,n)}this._x0=this._x1,this._x1=t,this._y0=this._y1,this._y1=n}};tc.prototype={lineStart:function(){this._x=[],this._y=[],this._basis.lineStart()},lineEnd:function(){var t=this._x,n=this._y,e=t.length-1;if(e>0)for(var r,i=t[0],o=n[0],u=t[e]-i,a=n[e]-o,c=-1;++c<=e;)r=c/e,this._basis.point(this._beta*t[c]+(1-this._beta)*(i+r*u),this._beta*n[c]+(1-this._beta)*(o+r*a));this._x=this._y=null,this._basis.lineEnd()},point:function(t,n){this._x.push(+t),this._y.push(+n)}};var Gg=function t(n){function e(t){return 1===n?new Ja(t):new tc(t,n)}return e.beta=function(n){return t(+n)},e}(.85);ec.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._x2=this._y0=this._y1=this._y2=NaN,this._point=0},lineEnd:function(){switch(this._point){case 2:this._context.lineTo(this._x2,this._y2);break;case 3:nc(this,this._x1,this._y1)}(this._line||0!==this._line&&1===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,n){switch(t=+t,n=+n,this._point){case 0:this._point=1,this._line?this._context.lineTo(t,n):this._context.moveTo(t,n);break;case 1:this._point=2,this._x1=t,this._y1=n;break;case 2:this._point=3;default:nc(this,t,n)}this._x0=this._x1,this._x1=this._x2,this._x2=t,this._y0=this._y1,this._y1=this._y2,this._y2=n}};var Jg=function t(n){function e(t){return new ec(t,n)}return e.tension=function(n){return t(+n)},e}(0);rc.prototype={areaStart:Zg,areaEnd:Zg,lineStart:function(){this._x0=this._x1=this._x2=this._x3=this._x4=this._x5=this._y0=this._y1=this._y2=this._y3=this._y4=this._y5=NaN,this._point=0},lineEnd:function(){switch(this._point){case 1:this._context.moveTo(this._x3,this._y3),this._context.closePath();break;case 2:this._context.lineTo(this._x3,this._y3),this._context.closePath();break;case 3:this.point(this._x3,this._y3),this.point(this._x4,this._y4),this.point(this._x5,this._y5)}},point:function(t,n){switch(t=+t,n=+n,this._point){case 0:this._point=1,this._x3=t,this._y3=n;break;case 1:this._point=2,this._context.moveTo(this._x4=t,this._y4=n);break;case 2:this._point=3,this._x5=t,this._y5=n;break;default:nc(this,t,n)}this._x0=this._x1,this._x1=this._x2,this._x2=t,this._y0=this._y1,this._y1=this._y2,this._y2=n}};var Qg=function t(n){function e(t){return new rc(t,n)}return e.tension=function(n){return t(+n)},e}(0);ic.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._x2=this._y0=this._y1=this._y2=NaN,this._point=0},lineEnd:function(){(this._line||0!==this._line&&3===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,n){switch(t=+t,n=+n,this._point){case 0:this._point=1;break;case 1:this._point=2;break;case 2:this._point=3,this._line?this._context.lineTo(this._x2,this._y2):this._context.moveTo(this._x2,this._y2);break;case 3:this._point=4;default:nc(this,t,n)}this._x0=this._x1,this._x1=this._x2,this._x2=t,this._y0=this._y1,this._y1=this._y2,this._y2=n}};var Kg=function t(n){function e(t){return new ic(t,n)}return e.tension=function(n){return t(+n)},e}(0);uc.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._x2=this._y0=this._y1=this._y2=NaN,this._l01_a=this._l12_a=this._l23_a=this._l01_2a=this._l12_2a=this._l23_2a=this._point=0},lineEnd:function(){switch(this._point){case 2:this._context.lineTo(this._x2,this._y2);break;case 3:this.point(this._x2,this._y2)}(this._line||0!==this._line&&1===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,n){if(t=+t,n=+n,this._point){var e=this._x2-t,r=this._y2-n;this._l23_a=Math.sqrt(this._l23_2a=Math.pow(e*e+r*r,this._alpha))}switch(this._point){case 0:this._point=1,this._line?this._context.lineTo(t,n):this._context.moveTo(t,n);break;case 1:this._point=2;break;case 2:this._point=3;default:oc(this,t,n)}this._l01_a=this._l12_a,this._l12_a=this._l23_a,this._l01_2a=this._l12_2a,this._l12_2a=this._l23_2a,this._x0=this._x1,this._x1=this._x2,this._x2=t,this._y0=this._y1,this._y1=this._y2,this._y2=n}};var tm=function t(n){function e(t){return n?new uc(t,n):new ec(t,0)}return e.alpha=function(n){return t(+n)},e}(.5);ac.prototype={areaStart:Zg,areaEnd:Zg,lineStart:function(){this._x0=this._x1=this._x2=this._x3=this._x4=this._x5=this._y0=this._y1=this._y2=this._y3=this._y4=this._y5=NaN,this._l01_a=this._l12_a=this._l23_a=this._l01_2a=this._l12_2a=this._l23_2a=this._point=0},lineEnd:function(){switch(this._point){case 1:this._context.moveTo(this._x3,this._y3),this._context.closePath();break;case 2:this._context.lineTo(this._x3,this._y3),this._context.closePath();break;case 3:this.point(this._x3,this._y3),this.point(this._x4,this._y4),this.point(this._x5,this._y5)}},point:function(t,n){if(t=+t,n=+n,this._point){var e=this._x2-t,r=this._y2-n;this._l23_a=Math.sqrt(this._l23_2a=Math.pow(e*e+r*r,this._alpha))}switch(this._point){case 0:this._point=1,this._x3=t,this._y3=n;break;case 1:this._point=2,this._context.moveTo(this._x4=t,this._y4=n);break;case 2:this._point=3,this._x5=t,this._y5=n;break;default:oc(this,t,n)}this._l01_a=this._l12_a,this._l12_a=this._l23_a,this._l01_2a=this._l12_2a,this._l12_2a=this._l23_2a,this._x0=this._x1,this._x1=this._x2,this._x2=t,this._y0=this._y1,this._y1=this._y2,this._y2=n}};var nm=function t(n){function e(t){return n?new ac(t,n):new rc(t,0)}return e.alpha=function(n){return t(+n)},e}(.5);cc.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._x2=this._y0=this._y1=this._y2=NaN,this._l01_a=this._l12_a=this._l23_a=this._l01_2a=this._l12_2a=this._l23_2a=this._point=0},lineEnd:function(){(this._line||0!==this._line&&3===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,n){if(t=+t,n=+n,this._point){var e=this._x2-t,r=this._y2-n;this._l23_a=Math.sqrt(this._l23_2a=Math.pow(e*e+r*r,this._alpha))}switch(this._point){case 0:this._point=1;break;case 1:this._point=2;break;case 2:this._point=3,this._line?this._context.lineTo(this._x2,this._y2):this._context.moveTo(this._x2,this._y2);break;case 3:this._point=4;default:oc(this,t,n)}this._l01_a=this._l12_a,this._l12_a=this._l23_a,this._l01_2a=this._l12_2a,this._l12_2a=this._l23_2a,this._x0=this._x1,this._x1=this._x2,this._x2=t,this._y0=this._y1,this._y1=this._y2,this._y2=n}};var em=function t(n){function e(t){return n?new cc(t,n):new ic(t,0)}return e.alpha=function(n){return t(+n)},e}(.5);sc.prototype={areaStart:Zg,areaEnd:Zg,lineStart:function(){this._point=0},lineEnd:function(){this._point&&this._context.closePath()},point:function(t,n){t=+t,n=+n,this._point?this._context.lineTo(t,n):(this._point=1,this._context.moveTo(t,n))}};dc.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._y0=this._y1=this._t0=NaN,this._point=0},lineEnd:function(){switch(this._point){case 2:this._context.lineTo(this._x1,this._y1);break;case 3:pc(this,this._t0,hc(this,this._t0))}(this._line||0!==this._line&&1===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,n){var e=NaN;if(t=+t,n=+n,t!==this._x1||n!==this._y1){switch(this._point){case 0:this._point=1,this._line?this._context.lineTo(t,n):this._context.moveTo(t,n);break;case 1:this._point=2;break;case 2:this._point=3,pc(this,hc(this,e=lc(this,t,n)),e);break;default:pc(this,this._t0,e=lc(this,t,n))}this._x0=this._x1,this._x1=t,this._y0=this._y1,this._y1=n,this._t0=e}}},(vc.prototype=Object.create(dc.prototype)).point=function(t,n){dc.prototype.point.call(this,n,t)},_c.prototype={moveTo:function(t,n){this._context.moveTo(n,t)},closePath:function(){this._context.closePath()},lineTo:function(t,n){this._context.lineTo(n,t)},bezierCurveTo:function(t,n,e,r,i,o){this._context.bezierCurveTo(n,t,r,e,o,i)}},yc.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x=[],this._y=[]},lineEnd:function(){var t=this._x,n=this._y,e=t.length;if(e)if(this._line?this._context.lineTo(t[0],n[0]):this._context.moveTo(t[0],n[0]),2===e)this._context.lineTo(t[1],n[1]);else for(var r=gc(t),i=gc(n),o=0,u=1;u=0&&(this._t=1-this._t,this._line=1-this._line)},point:function(t,n){switch(t=+t,n=+n,this._point){case 0:this._point=1,this._line?this._context.lineTo(t,n):this._context.moveTo(t,n);break;case 1:this._point=2;default:if(this._t<=0)this._context.lineTo(this._x,n),this._context.lineTo(t,n);else{var e=this._x*(1-this._t)+t*this._t;this._context.lineTo(e,this._y),this._context.lineTo(e,n)}}this._x=t,this._y=n}};var rm=function(t,n){if((i=t.length)>1)for(var e,r,i,o=1,u=t[n[0]],a=u.length;o=0;)e[n]=n;return e},om=function(t){var n=t.map(bc);return im(t).sort(function(t,e){return n[t]-n[e]})},um=function(t){return function(){return t}};Tc.prototype={constructor:Tc,insert:function(t,n){var e,r,i;if(t){if(n.P=t,n.N=t.N,t.N&&(t.N.P=n),t.N=n,t.R){for(t=t.R;t.L;)t=t.L;t.L=n}else t.R=n;e=t}else this._?(t=Ec(this._),n.P=null,n.N=t,t.P=t.L=n,e=t):(n.P=n.N=null,this._=n,e=null);for(n.L=n.R=null,n.U=e,n.C=!0,t=n;e&&e.C;)e===(r=e.U).L?(i=r.R)&&i.C?(e.C=i.C=!1,r.C=!0,t=r):(t===e.R&&(Nc(this,e),e=(t=e).U),e.C=!1,r.C=!0,Sc(this,r)):(i=r.L)&&i.C?(e.C=i.C=!1,r.C=!0,t=r):(t===e.L&&(Sc(this,e),e=(t=e).U),e.C=!1,r.C=!0,Nc(this,r)),e=t.U;this._.C=!1},remove:function(t){t.N&&(t.N.P=t.P),t.P&&(t.P.N=t.N),t.N=t.P=null;var n,e,r,i=t.U,o=t.L,u=t.R;if(e=o?u?Ec(u):o:u,i?i.L===t?i.L=e:i.R=e:this._=e,o&&u?(r=e.C,e.C=t.C,e.L=o,o.U=e,e!==u?(i=e.U,e.U=t.U,t=e.R,i.L=t,e.R=u,u.U=e):(e.U=i,i=e,t=e.R)):(r=t.C,t=e),t&&(t.U=i),!r)if(t&&t.C)t.C=!1;else{do{if(t===this._)break;if(t===i.L){if((n=i.R).C&&(n.C=!1,i.C=!0,Nc(this,i),n=i.R),n.L&&n.L.C||n.R&&n.R.C){n.R&&n.R.C||(n.L.C=!1,n.C=!0,Sc(this,n),n=i.R),n.C=i.C,i.C=n.R.C=!1,Nc(this,i),t=this._;break}}else if((n=i.L).C&&(n.C=!1,i.C=!0,Sc(this,i),n=i.L),n.L&&n.L.C||n.R&&n.R.C){n.L&&n.L.C||(n.R.C=!1,n.C=!0,Nc(this,n),n=i.L),n.C=i.C,i.C=n.L.C=!1,Sc(this,i),t=this._;break}n.C=!0,t=i,i=i.U}while(!t.C);t&&(t.C=!1)}}};var am,cm,sm,fm,lm,hm=[],pm=[],dm=1e-6,vm=1e-12;Kc.prototype={constructor:Kc,polygons:function(){var t=this.edges;return this.cells.map(function(n){var e=n.halfedges.map(function(e){return Dc(n,t[e])});return e.data=n.site.data,e})},triangles:function(){var t=[],n=this.edges;return this.cells.forEach(function(e,r){if(o=(i=e.halfedges).length)for(var i,o,u,a=e.site,c=-1,s=n[i[o-1]],f=s.left===a?s.right:s.left;++c=a)return null;var c=t-i.site[0],s=n-i.site[1],f=c*c+s*s;do{i=o.cells[r=u],u=null,i.halfedges.forEach(function(e){var r=o.edges[e],a=r.left;if(a!==i.site&&a||(a=r.right)){var c=t-a[0],s=n-a[1],l=c*c+s*s;lt?1:n>=t?0:NaN},t.deviation=_s,t.extent=ys,t.histogram=function(){function t(t){var o,u,a=t.length,c=new Array(a);for(o=0;ol;)h.pop(),--p;var d,v=new Array(p+1);for(o=0;o<=p;++o)(d=v[o]=[]).x0=o>0?h[o-1]:f,d.x1=o=e)for(r=e;++or&&(r=e)}else for(;++o=e)for(r=e;++or&&(r=e);return r},t.mean=function(t,n){var e,r=t.length,i=r,o=-1,u=0;if(null==n)for(;++o=o.length)return null!=e&&n.sort(e),null!=r?r(n):n;for(var c,s,f,l=-1,h=n.length,p=o[i++],d=we(),v=u();++lo.length)return t;var i,a=u[e-1];return null!=r&&e>=o.length?i=t.entries():(i=[],t.each(function(t,r){i.push({key:r,values:n(t,e)})})),null!=a?i.sort(function(t,n){return a(t.key,n.key)}):i}var e,r,i,o=[],u=[];return i={object:function(n){return t(n,0,Me,Te)},map:function(n){return t(n,0,ke,Ne)},entries:function(e){return n(t(e,0,ke,Ne),0)},key:function(t){return o.push(t),i},sortKeys:function(t){return u[o.length-1]=t,i},sortValues:function(t){return e=t,i},rollup:function(t){return r=t,i}}},t.set=Ee,t.map=we,t.keys=function(t){var n=[];for(var e in t)n.push(e);return n},t.values=function(t){var n=[];for(var e in t)n.push(t[e]);return n},t.entries=function(t){var n=[];for(var e in t)n.push({key:e,value:t[e]});return n},t.color=Tt,t.rgb=Et,t.hsl=Pt,t.lab=Ut,t.hcl=jt,t.cubehelix=$t,t.dispatch=h,t.drag=function(){function n(t){t.on("mousedown.drag",e).filter(bt).on("touchstart.drag",o).on("touchmove.drag",u).on("touchend.drag touchcancel.drag",a).style("touch-action","none").style("-webkit-tap-highlight-color","rgba(0,0,0,0)")}function e(){if(!p&&d.apply(this,arguments)){var n=c("mouse",v.apply(this,arguments),Ks,this,arguments);n&&(cf(t.event.view).on("mousemove.drag",r,!0).on("mouseup.drag",i,!0),lf(t.event.view),vt(),l=!1,s=t.event.clientX,f=t.event.clientY,n("start"))}}function r(){if(ff(),!l){var n=t.event.clientX-s,e=t.event.clientY-f;l=n*n+e*e>x}y.mouse("drag")}function i(){cf(t.event.view).on("mousemove.drag mouseup.drag",null),_t(t.event.view,l),ff(),y.mouse("end")}function o(){if(d.apply(this,arguments)){var n,e,r=t.event.changedTouches,i=v.apply(this,arguments),o=r.length;for(n=0;nc+p||is+p||or.index){var d=c-a.x-a.vx,v=s-a.y-a.vy,_=d*d+v*v;_t.r&&(t.r=t[n].r)}function r(){if(i){var n,e,r=i.length;for(o=new Array(r),n=0;n=f)){(t.data!==o||t.next)&&(0===i&&(i=cp(),p+=i*i),0===c&&(c=cp(),p+=c*c),p1?(null==n?l.remove(t):l.set(t,i(n)),o):l.get(t)},find:function(n,e,r){var i,o,u,a,c,s=0,f=t.length;for(null==r?r=1/0:r*=r,s=0;s1?(d.on(t,n),o):d.on(t)}}},t.forceX=function(t){function n(t){for(var n,e=0,u=r.length;exr(r[0],r[1])&&(r[1]=i[1]),xr(i[0],r[1])>xr(r[0],r[1])&&(r[0]=i[0])):o.push(r=i);for(u=-1/0,n=0,r=o[e=o.length-1];n<=e;r=i,++n)i=o[n],(a=xr(r[1],i[0]))>u&&(u=a,Lp=i[0],Up=r[1])}return Bp=jp=null,Lp===1/0||qp===1/0?[[NaN,NaN],[NaN,NaN]]:[[Lp,qp],[Up,Dp]]},t.geoCentroid=function(t){Hp=Xp=$p=Vp=Wp=Zp=Gp=Jp=Qp=Kp=td=0,Ed(t,Ld);var n=Qp,e=Kp,r=td,i=n*n+e*e+r*r;return i<1e-12&&(n=Zp,e=Gp,r=Jp,Xp=.12&&i<.234&&r>=-.425&&r<-.214?s:i>=.166&&i<.234&&r>=-.214&&r<-.115?f:c).invert(t)},t.stream=function(t){return e&&r===t?e:e=Ri([c.stream(r=t),s.stream(t),f.stream(t)])},t.precision=function(t){return arguments.length?(c.precision(t),s.precision(t),f.precision(t),n()):c.precision()},t.scale=function(n){return arguments.length?(c.scale(n),s.scale(.35*n),f.scale(n),t.translate(c.translate())):c.scale()},t.translate=function(t){if(!arguments.length)return c.translate();var e=c.scale(),r=+t[0],a=+t[1];return i=c.translate(t).clipExtent([[r-.455*e,a-.238*e],[r+.455*e,a+.238*e]]).stream(l),o=s.translate([r-.307*e,a+.201*e]).clipExtent([[r-.425*e+ad,a+.12*e+ad],[r-.214*e-ad,a+.234*e-ad]]).stream(l),u=f.translate([r-.205*e,a+.212*e]).clipExtent([[r-.214*e+ad,a+.166*e+ad],[r-.115*e-ad,a+.234*e-ad]]).stream(l),n()},t.fitExtent=function(n,e){return Ti(t,n,e)},t.fitSize=function(n,e){return ki(t,n,e)},t.scale(1070)},t.geoAzimuthalEqualArea=function(){return Ei($v).scale(124.75).clipAngle(179.999)},t.geoAzimuthalEqualAreaRaw=$v,t.geoAzimuthalEquidistant=function(){return Ei(Vv).scale(79.4188).clipAngle(179.999)},t.geoAzimuthalEquidistantRaw=Vv,t.geoConicConformal=function(){return Ci(Fi).scale(109.5).parallels([30,30])},t.geoConicConformalRaw=Fi,t.geoConicEqualArea=Hv,t.geoConicEqualAreaRaw=Pi,t.geoConicEquidistant=function(){return Ci(Yi).scale(131.154).center([0,13.9389])},t.geoConicEquidistantRaw=Yi,t.geoEquirectangular=function(){return Ei(Ii).scale(152.63)},t.geoEquirectangularRaw=Ii,t.geoGnomonic=function(){return Ei(Bi).scale(144.049).clipAngle(60)},t.geoGnomonicRaw=Bi,t.geoIdentity=function(){function t(){return i=o=null,u}var n,e,r,i,o,u,a=1,c=0,s=0,f=1,l=1,h=lv,p=null,d=lv;return u={stream:function(t){return i&&o===t?i:i=h(d(o=t))},clipExtent:function(i){return arguments.length?(d=null==i?(p=n=e=r=null,lv):Br(p=+i[0][0],n=+i[0][1],e=+i[1][0],r=+i[1][1]),t()):null==p?null:[[p,n],[e,r]]},scale:function(n){return arguments.length?(h=ji((a=+n)*f,a*l,c,s),t()):a},translate:function(n){return arguments.length?(h=ji(a*f,a*l,c=+n[0],s=+n[1]),t()):[c,s]},reflectX:function(n){return arguments.length?(h=ji(a*(f=n?-1:1),a*l,c,s),t()):f<0},reflectY:function(n){return arguments.length?(h=ji(a*f,a*(l=n?-1:1),c,s),t()):l<0},fitExtent:function(t,n){return Ti(u,t,n)},fitSize:function(t,n){return ki(u,t,n)}}},t.geoProjection=Ei,t.geoProjectionMutator=Ai,t.geoMercator=function(){return Di(Ui).scale(961/ld)},t.geoMercatorRaw=Ui,t.geoOrthographic=function(){return Ei(Hi).scale(249.5).clipAngle(90+ad)},t.geoOrthographicRaw=Hi,t.geoStereographic=function(){return Ei(Xi).scale(250).clipAngle(142)},t.geoStereographicRaw=Xi,t.geoTransverseMercator=function(){var t=Di($i),n=t.center,e=t.rotate;return t.center=function(t){return arguments.length?n([-t[1],t[0]]):(t=n(),[t[1],-t[0]])},t.rotate=function(t){return arguments.length?e([t[0],t[1],t.length>2?t[2]+90:90]):(t=e(),[t[0],t[1],t[2]-90])},e([0,0,90]).scale(159.155)},t.geoTransverseMercatorRaw=$i,t.geoRotation=Wd,t.geoStream=Ed,t.geoTransform=function(t){return{stream:wi(t)}},t.cluster=function(){function t(t){var o,u=0;t.eachAfter(function(t){var e=t.children;e?(t.x=Wi(e),t.y=Gi(e)):(t.x=o?u+=n(t,o):0,t.y=0,o=t)});var a=Qi(t),c=Ki(t),s=a.x-n(a,c)/2,f=c.x+n(c,a)/2;return t.eachAfter(i?function(n){n.x=(n.x-t.x)*e,n.y=(t.y-n.y)*r}:function(n){n.x=(n.x-s)/(f-s)*e,n.y=(1-(t.y?n.y/t.y:1))*r})}var n=Vi,e=1,r=1,i=!1;return t.separation=function(e){return arguments.length?(n=e,t):n},t.size=function(n){return arguments.length?(i=!1,e=+n[0],r=+n[1],t):i?null:[e,r]},t.nodeSize=function(n){return arguments.length?(i=!0,e=+n[0],r=+n[1],t):i?[e,r]:null},t},t.hierarchy=eo,t.pack=function(){function t(t){return t.x=e/2,t.y=r/2,n?t.eachBefore(No(n)).eachAfter(So(i,.5)).eachBefore(Eo(1)):t.eachBefore(No(ko)).eachAfter(So(To,1)).eachAfter(So(i,t.r/Math.min(e,r))).eachBefore(Eo(Math.min(e,r)/(2*t.r))),t}var n=null,e=1,r=1,i=To;return t.radius=function(e){return arguments.length?(n=wo(e),t):n},t.size=function(n){return arguments.length?(e=+n[0],r=+n[1],t):[e,r]},t.padding=function(n){return arguments.length?(i="function"==typeof n?n:Gv(+n),t):i},t},t.packSiblings=function(t){return bo(t),t},t.packEnclose=Zv,t.partition=function(){function t(t){var u=t.height+1;return t.x0=t.y0=i,t.x1=e,t.y1=r/u,t.eachBefore(n(r,u)),o&&t.eachBefore(Jv),t}function n(t,n){return function(e){e.children&&Qv(e,e.x0,t*(e.depth+1)/n,e.x1,t*(e.depth+2)/n);var r=e.x0,o=e.y0,u=e.x1-i,a=e.y1-i;u0)throw new Error("cycle");return o}var n=Ao,e=Co;return t.id=function(e){return arguments.length?(n=Mo(e),t):n},t.parentId=function(n){return arguments.length?(e=Mo(n),t):e},t},t.tree=function(){function t(t){var r=Oo(t);if(r.eachAfter(n),r.parent.m=-r.z,r.eachBefore(e),c)t.eachBefore(i);else{var s=t,f=t,l=t;t.eachBefore(function(t){t.xf.x&&(f=t),t.depth>l.depth&&(l=t)});var h=s===f?1:o(s,f)/2,p=h-s.x,d=u/(f.x+h+p),v=a/(l.depth||1);t.eachBefore(function(t){t.x=(t.x+p)*d,t.y=t.depth*v})}return t}function n(t){var n=t.children,e=t.parent.children,i=t.i?e[t.i-1]:null;if(n){qo(t);var u=(n[0].z+n[n.length-1].z)/2;i?(t.z=i.z+o(t._,i._),t.m=t.z-u):t.z=u}else i&&(t.z=i.z+o(t._,i._));t.parent.A=r(t,i,t.parent.A||e[0])}function e(t){t._.x=t.z+t.parent.m,t.m+=t.parent.m}function r(t,n,e){if(n){for(var r,i=t,u=t,a=n,c=i.parent.children[0],s=i.m,f=u.m,l=a.m,h=c.m;a=Ro(a),i=Po(i),a&&i;)c=Po(c),(u=Ro(u)).a=t,(r=a.z+l-i.z-s+o(a._,i._))>0&&(Lo(Uo(a,t,e),t,r),s+=r,f+=r),l+=a.m,s+=i.m,h+=c.m,f+=u.m;a&&!Ro(u)&&(u.t=a,u.m+=l-f),i&&!Po(c)&&(c.t=i,c.m+=s-h,e=t)}return e}function i(t){t.x*=u,t.y=t.depth*a}var o=zo,u=1,a=1,c=null;return t.separation=function(n){return arguments.length?(o=n,t):o},t.size=function(n){return arguments.length?(c=!1,u=+n[0],a=+n[1],t):c?null:[u,a]},t.nodeSize=function(n){return arguments.length?(c=!0,u=+n[0],a=+n[1],t):c?[u,a]:null},t},t.treemap=function(){function t(t){return t.x0=t.y0=0,t.x1=i,t.y1=o,t.eachBefore(n),u=[0],r&&t.eachBefore(Jv),t}function n(t){var n=u[t.depth],r=t.x0+n,i=t.y0+n,o=t.x1-n,h=t.y1-n;o=n-1){var s=c[t];return s.x0=r,s.y0=i,s.x1=u,void(s.y1=a)}for(var l=f[t],h=e/2+l,p=t+1,d=n-1;p>>1;f[v]a-i){var g=(r*y+u*_)/e;o(t,p,_,r,i,g,a),o(p,n,y,g,i,u,a)}else{var m=(i*y+a*_)/e;o(t,p,_,r,i,u,m),o(p,n,y,r,m,u,a)}}var u,a,c=t.children,s=c.length,f=new Array(s+1);for(f[0]=a=u=0;u=0;--n)s.push(t[r[o[n]][2]]);for(n=+a;na!=s>a&&u<(c-e)*(a-r)/(s-r)+e&&(f=!f),c=e,s=r;return f},t.polygonLength=function(t){for(var n,e,r=-1,i=t.length,o=t[i-1],u=o[0],a=o[1],c=0;++r1)&&(t-=Math.floor(t));var n=Math.abs(t-.5);return ig.h=360*t-100,ig.s=1.5-1.5*n,ig.l=.8-.9*n,ig+""},t.interpolateWarm=eg,t.interpolateCool=rg,t.interpolateViridis=og,t.interpolateMagma=ug,t.interpolateInferno=ag,t.interpolatePlasma=cg,t.scaleSequential=Ea,t.creator=Hs,t.local=m,t.matcher=Zs,t.mouse=Ks,t.namespace=js,t.namespaces=Bs,t.select=cf,t.selectAll=function(t){return"string"==typeof t?new pt([document.querySelectorAll(t)],[document.documentElement]):new pt([null==t?[]:t],af)},t.selection=dt,t.selector=tf,t.selectorAll=nf,t.style=B,t.touch=sf,t.touches=function(t,n){null==n&&(n=Js().touches);for(var e=0,r=n?n.length:0,i=new Array(r);eh;if(c||(c=t=ve()),lyg)if(d>xg-yg)c.moveTo(l*hg(h),l*vg(h)),c.arc(0,0,l,h,p,!v),f>yg&&(c.moveTo(f*hg(p),f*vg(p)),c.arc(0,0,f,p,h,v));else{var _,y,g=h,m=p,x=h,b=p,w=d,M=d,T=a.apply(this,arguments)/2,k=T>yg&&(i?+i.apply(this,arguments):_g(f*f+l*l)),N=dg(fg(l-f)/2,+r.apply(this,arguments)),S=N,E=N;if(k>yg){var A=Ca(k/f*vg(T)),C=Ca(k/l*vg(T));(w-=2*A)>yg?(A*=v?1:-1,x+=A,b-=A):(w=0,x=b=(h+p)/2),(M-=2*C)>yg?(C*=v?1:-1,g+=C,m-=C):(M=0,g=m=(h+p)/2)}var z=l*hg(g),P=l*vg(g),R=f*hg(b),L=f*vg(b);if(N>yg){var q=l*hg(m),U=l*vg(m),D=f*hg(x),O=f*vg(x);if(dyg?Ua(z,P,D,O,q,U,R,L):[R,L],I=z-F[0],Y=P-F[1],B=q-F[0],j=U-F[1],H=1/vg(Aa((I*B+Y*j)/(_g(I*I+Y*Y)*_g(B*B+j*j)))/2),X=_g(F[0]*F[0]+F[1]*F[1]);S=dg(N,(f-X)/(H-1)),E=dg(N,(l-X)/(H+1))}}M>yg?E>yg?(_=Da(D,O,z,P,l,E,v),y=Da(q,U,R,L,l,E,v),c.moveTo(_.cx+_.x01,_.cy+_.y01),Eyg&&w>yg?S>yg?(_=Da(R,L,q,U,f,-S,v),y=Da(z,P,D,O,f,-S,v),c.lineTo(_.cx+_.x01,_.cy+_.y01),S0&&(p+=l);for(null!=e?d.sort(function(t,n){return e(v[t],v[n])}):null!=r&&d.sort(function(n,e){return r(t[n],t[e])}),a=0,s=p?(y-h*m)/p:0;a0?l*s:0)+m,v[c]={data:t[c],index:a,value:l,startAngle:_,endAngle:f,padAngle:g};return v}var n=kg,e=Tg,r=null,i=sg(0),o=sg(xg),u=sg(0);return t.value=function(e){return arguments.length?(n="function"==typeof e?e:sg(+e),t):n},t.sortValues=function(n){return arguments.length?(e=n,r=null,t):e},t.sort=function(n){return arguments.length?(r=n,e=null,t):r},t.startAngle=function(n){return arguments.length?(i="function"==typeof n?n:sg(+n),t):i},t.endAngle=function(n){return arguments.length?(o="function"==typeof n?n:sg(+n),t):o},t.padAngle=function(n){return arguments.length?(u="function"==typeof n?n:sg(+n),t):u},t},t.areaRadial=Eg,t.radialArea=Eg,t.lineRadial=Sg,t.radialLine=Sg,t.pointRadial=Ag,t.linkHorizontal=function(){return $a(Va)},t.linkVertical=function(){return $a(Wa)},t.linkRadial=function(){var t=$a(Za);return t.angle=t.x,delete t.x,t.radius=t.y,delete t.y,t},t.symbol=function(){function t(){var t;if(r||(r=t=ve()),n.apply(this,arguments).draw(r,+e.apply(this,arguments)),t)return r=null,t+""||null}var n=sg(zg),e=sg(64),r=null;return t.type=function(e){return arguments.length?(n="function"==typeof e?e:sg(e),t):n},t.size=function(n){return arguments.length?(e="function"==typeof n?n:sg(+n),t):e},t.context=function(n){return arguments.length?(r=null==n?null:n,t):r},t},t.symbols=Wg,t.symbolCircle=zg,t.symbolCross=Pg,t.symbolDiamond=qg,t.symbolSquare=Ig,t.symbolStar=Fg,t.symbolTriangle=Bg,t.symbolWye=Vg,t.curveBasisClosed=function(t){return new Qa(t)},t.curveBasisOpen=function(t){return new Ka(t)},t.curveBasis=function(t){return new Ja(t)},t.curveBundle=Gg,t.curveCardinalClosed=Qg,t.curveCardinalOpen=Kg,t.curveCardinal=Jg,t.curveCatmullRomClosed=nm,t.curveCatmullRomOpen=em,t.curveCatmullRom=tm,t.curveLinearClosed=function(t){return new sc(t)},t.curveLinear=bg,t.curveMonotoneX=function(t){return new dc(t)},t.curveMonotoneY=function(t){return new vc(t)},t.curveNatural=function(t){return new yc(t)},t.curveStep=function(t){return new mc(t,.5)},t.curveStepAfter=function(t){return new mc(t,1)},t.curveStepBefore=function(t){return new mc(t,0)},t.stack=function(){function t(t){var o,u,a=n.apply(this,arguments),c=t.length,s=a.length,f=new Array(s);for(o=0;o0){for(var e,r,i,o=0,u=t[0].length;o1)for(var e,r,i,o,u,a,c=0,s=t[n[0]].length;c=0?(r[0]=o,r[1]=o+=i):i<0?(r[1]=u,r[0]=u+=i):r[0]=o},t.stackOffsetNone=rm,t.stackOffsetSilhouette=function(t,n){if((e=t.length)>0){for(var e,r=0,i=t[n[0]],o=i.length;r0&&(r=(e=t[n[0]]).length)>0){for(var e,r,i,o=0,u=1;uUl&&e.name===n)return new Gn([[t]],yh,n,+r)}return null},t.interrupt=jl,t.voronoi=function(){function t(t){return new Kc(t.map(function(r,i){var o=[Math.round(n(r,i,t)/dm)*dm,Math.round(e(r,i,t)/dm)*dm];return o.index=i,o.data=r,o}),r)}var n=wc,e=Mc,r=null;return t.polygons=function(n){return t(n).polygons()},t.links=function(n){return t(n).links()},t.triangles=function(n){return t(n).triangles()},t.x=function(e){return arguments.length?(n="function"==typeof e?e:um(+e),t):n},t.y=function(n){return arguments.length?(e="function"==typeof n?n:um(+n),t):e},t.extent=function(n){return arguments.length?(r=null==n?null:[[+n[0][0],+n[0][1]],[+n[1][0],+n[1][1]]],t):r&&[[r[0][0],r[0][1]],[r[1][0],r[1][1]]]},t.size=function(n){return arguments.length?(r=null==n?null:[[0,0],[+n[0],+n[1]]],t):r&&[r[1][0]-r[0][0],r[1][1]-r[0][1]]},t},t.zoom=function(){function n(t){t.property("__zoom",us).on("wheel.zoom",s).on("mousedown.zoom",f).on("dblclick.zoom",l).filter(cs).on("touchstart.zoom",p).on("touchmove.zoom",d).on("touchend.zoom touchcancel.zoom",v).style("touch-action","none").style("-webkit-tap-highlight-color","rgba(0,0,0,0)")}function e(t,n){return(n=Math.max(b,Math.min(w,n)))===t.k?t:new ns(n,t.x,t.y)}function r(t,n,e){var r=n[0]-e[0]*t.k,i=n[1]-e[1]*t.k;return r===t.x&&i===t.y?t:new ns(t.k,r,i)}function i(t,n){var e=t.invertX(n[0][0])-M,r=t.invertX(n[1][0])-T,i=t.invertY(n[0][1])-k,o=t.invertY(n[1][1])-S;return t.translate(r>e?(e+r)/2:Math.min(0,e)||Math.max(0,r),o>i?(i+o)/2:Math.min(0,i)||Math.max(0,o))}function o(t){return[(+t[0][0]+ +t[1][0])/2,(+t[0][1]+ +t[1][1])/2]}function u(t,n,e){t.on("start.zoom",function(){a(this,arguments).start()}).on("interrupt.zoom end.zoom",function(){a(this,arguments).end()}).tween("zoom",function(){var t=this,r=arguments,i=a(t,r),u=m.apply(t,r),c=e||o(u),s=Math.max(u[1][0]-u[0][0],u[1][1]-u[0][1]),f=t.__zoom,l="function"==typeof n?n.apply(t,r):n,h=A(f.invert(c).concat(s/f.k),l.invert(c).concat(s/l.k));return function(t){if(1===t)t=l;else{var n=h(t),e=s/n[2];t=new ns(e,c[0]-n[0]*e,c[1]-n[1]*e)}i.zoom(null,t)}})}function a(t,n){for(var e,r=0,i=C.length;rL}n.zoom("mouse",i(r(n.that.__zoom,n.mouse[0]=Ks(n.that),n.mouse[1]),n.extent))},!0).on("mouseup.zoom",function(){e.on("mousemove.zoom mouseup.zoom",null),_t(t.event.view,n.moved),gm(),n.end()},!0),o=Ks(this),u=t.event.clientX,c=t.event.clientY;lf(t.event.view),rs(),n.mouse=[o,this.__zoom.invert(o)],jl(this),n.start()}}function l(){if(g.apply(this,arguments)){var o=this.__zoom,a=Ks(this),c=o.invert(a),s=i(r(e(o,o.k*(t.event.shiftKey?.5:2)),a,c),m.apply(this,arguments));gm(),E>0?cf(this).transition().duration(E).call(u,s,a):cf(this).call(n.transform,s)}}function p(){if(g.apply(this,arguments)){var n,e,r,i,o=a(this,arguments),u=t.event.changedTouches,c=u.length;for(rs(),e=0;e=0?(o>=Ts?10:o>=ks?5:o>=Ns?2:1)*Math.pow(10,i):-Math.pow(10,-i)/(o>=Ts?10:o>=ks?5:o>=Ns?2:1)}function i(t,n,e){var r=Math.abs(n-t)/Math.max(0,e),i=Math.pow(10,Math.floor(Math.log(r)/Math.LN10)),o=r/i;return o>=Ts?i*=10:o>=ks?i*=5:o>=Ns&&(i*=2),n=0&&(e=t.slice(r+1),t=t.slice(0,r)),t&&!n.hasOwnProperty(t))throw new Error("unknown type: "+t);return{type:t,name:e}})}function v(t,n){for(var e,r=0,i=t.length;r=0&&(n=t.slice(e+1),t=t.slice(0,e)),{type:t,name:n}})}function T(t){return function(){var n=this.__on;if(n){for(var e,r=0,i=-1,o=n.length;rn?1:t>=n?0:NaN}function R(t){return function(){this.removeAttribute(t)}}function L(t){return function(){this.removeAttributeNS(t.space,t.local)}}function q(t,n){return function(){this.setAttribute(t,n)}}function U(t,n){return function(){this.setAttributeNS(t.space,t.local,n)}}function D(t,n){return function(){var e=n.apply(this,arguments);null==e?this.removeAttribute(t):this.setAttribute(t,e)}}function O(t,n){return function(){var e=n.apply(this,arguments);null==e?this.removeAttributeNS(t.space,t.local):this.setAttributeNS(t.space,t.local,e)}}function F(t){return function(){this.style.removeProperty(t)}}function I(t,n,e){return function(){this.style.setProperty(t,n,e)}}function Y(t,n,e){return function(){var r=n.apply(this,arguments);null==r?this.style.removeProperty(t):this.style.setProperty(t,r,e)}}function B(t,n){return t.style.getPropertyValue(n)||uf(t).getComputedStyle(t,null).getPropertyValue(n)}function j(t){return function(){delete this[t]}}function H(t,n){return function(){this[t]=n}}function X(t,n){return function(){var e=n.apply(this,arguments);null==e?delete this[t]:this[t]=e}}function $(t){return t.trim().split(/^|\s+/)}function V(t){return t.classList||new W(t)}function W(t){this._node=t,this._names=$(t.getAttribute("class")||"")}function Z(t,n){for(var e=V(t),r=-1,i=n.length;++r>8&15|n>>4&240,n>>4&15|240&n,(15&n)<<4|15&n,1)):(n=gf.exec(t))?kt(parseInt(n[1],16)):(n=mf.exec(t))?new At(n[1],n[2],n[3],1):(n=xf.exec(t))?new At(255*n[1]/100,255*n[2]/100,255*n[3]/100,1):(n=bf.exec(t))?Nt(n[1],n[2],n[3],n[4]):(n=wf.exec(t))?Nt(255*n[1]/100,255*n[2]/100,255*n[3]/100,n[4]):(n=Mf.exec(t))?Ct(n[1],n[2]/100,n[3]/100,1):(n=Tf.exec(t))?Ct(n[1],n[2]/100,n[3]/100,n[4]):kf.hasOwnProperty(t)?kt(kf[t]):"transparent"===t?new At(NaN,NaN,NaN,0):null}function kt(t){return new At(t>>16&255,t>>8&255,255&t,1)}function Nt(t,n,e,r){return r<=0&&(t=n=e=NaN),new At(t,n,e,r)}function St(t){return t instanceof Mt||(t=Tt(t)),t?(t=t.rgb(),new At(t.r,t.g,t.b,t.opacity)):new At}function Et(t,n,e,r){return 1===arguments.length?St(t):new At(t,n,e,null==r?1:r)}function At(t,n,e,r){this.r=+t,this.g=+n,this.b=+e,this.opacity=+r}function Ct(t,n,e,r){return r<=0?t=n=e=NaN:e<=0||e>=1?t=n=NaN:n<=0&&(t=NaN),new Rt(t,n,e,r)}function zt(t){if(t instanceof Rt)return new Rt(t.h,t.s,t.l,t.opacity);if(t instanceof Mt||(t=Tt(t)),!t)return new Rt;if(t instanceof Rt)return t;var n=(t=t.rgb()).r/255,e=t.g/255,r=t.b/255,i=Math.min(n,e,r),o=Math.max(n,e,r),u=NaN,a=o-i,c=(o+i)/2;return a?(u=n===o?(e-r)/a+6*(e0&&c<1?0:u,new Rt(u,a,c,t.opacity)}function Pt(t,n,e,r){return 1===arguments.length?zt(t):new Rt(t,n,e,null==r?1:r)}function Rt(t,n,e,r){this.h=+t,this.s=+n,this.l=+e,this.opacity=+r}function Lt(t,n,e){return 255*(t<60?n+(e-n)*t/60:t<180?e:t<240?n+(e-n)*(240-t)/60:n)}function qt(t){if(t instanceof Dt)return new Dt(t.l,t.a,t.b,t.opacity);if(t instanceof Ht){var n=t.h*Nf;return new Dt(t.l,Math.cos(n)*t.c,Math.sin(n)*t.c,t.opacity)}t instanceof At||(t=St(t));var e=Yt(t.r),r=Yt(t.g),i=Yt(t.b),o=Ot((.4124564*e+.3575761*r+.1804375*i)/Ef),u=Ot((.2126729*e+.7151522*r+.072175*i)/Af);return new Dt(116*u-16,500*(o-u),200*(u-Ot((.0193339*e+.119192*r+.9503041*i)/Cf)),t.opacity)}function Ut(t,n,e,r){return 1===arguments.length?qt(t):new Dt(t,n,e,null==r?1:r)}function Dt(t,n,e,r){this.l=+t,this.a=+n,this.b=+e,this.opacity=+r}function Ot(t){return t>Lf?Math.pow(t,1/3):t/Rf+zf}function Ft(t){return t>Pf?t*t*t:Rf*(t-zf)}function It(t){return 255*(t<=.0031308?12.92*t:1.055*Math.pow(t,1/2.4)-.055)}function Yt(t){return(t/=255)<=.04045?t/12.92:Math.pow((t+.055)/1.055,2.4)}function Bt(t){if(t instanceof Ht)return new Ht(t.h,t.c,t.l,t.opacity);t instanceof Dt||(t=qt(t));var n=Math.atan2(t.b,t.a)*Sf;return new Ht(n<0?n+360:n,Math.sqrt(t.a*t.a+t.b*t.b),t.l,t.opacity)}function jt(t,n,e,r){return 1===arguments.length?Bt(t):new Ht(t,n,e,null==r?1:r)}function Ht(t,n,e,r){this.h=+t,this.c=+n,this.l=+e,this.opacity=+r}function Xt(t){if(t instanceof Vt)return new Vt(t.h,t.s,t.l,t.opacity);t instanceof At||(t=St(t));var n=t.r/255,e=t.g/255,r=t.b/255,i=(Bf*r+If*n-Yf*e)/(Bf+If-Yf),o=r-i,u=(Ff*(e-i)-Df*o)/Of,a=Math.sqrt(u*u+o*o)/(Ff*i*(1-i)),c=a?Math.atan2(u,o)*Sf-120:NaN;return new Vt(c<0?c+360:c,a,i,t.opacity)}function $t(t,n,e,r){return 1===arguments.length?Xt(t):new Vt(t,n,e,null==r?1:r)}function Vt(t,n,e,r){this.h=+t,this.s=+n,this.l=+e,this.opacity=+r}function Wt(t,n,e,r,i){var o=t*t,u=o*t;return((1-3*t+3*o-u)*n+(4-6*o+3*u)*e+(1+3*t+3*o-3*u)*r+u*i)/6}function Zt(t,n){return function(e){return t+e*n}}function Gt(t,n,e){return t=Math.pow(t,e),n=Math.pow(n,e)-t,e=1/e,function(r){return Math.pow(t+r*n,e)}}function Jt(t,n){var e=n-t;return e?Zt(t,e>180||e<-180?e-360*Math.round(e/360):e):Jf(isNaN(t)?n:t)}function Qt(t){return 1==(t=+t)?Kt:function(n,e){return e-n?Gt(n,e,t):Jf(isNaN(n)?e:n)}}function Kt(t,n){var e=n-t;return e?Zt(t,e):Jf(isNaN(t)?n:t)}function tn(t){return function(n){var e,r,i=n.length,o=new Array(i),u=new Array(i),a=new Array(i);for(e=0;e180?n+=360:n-t>180&&(t+=360),o.push({i:e.push(i(e)+"rotate(",null,r)-2,x:rl(t,n)})):n&&e.push(i(e)+"rotate("+n+r)}function a(t,n,e,o){t!==n?o.push({i:e.push(i(e)+"skewX(",null,r)-2,x:rl(t,n)}):n&&e.push(i(e)+"skewX("+n+r)}function c(t,n,e,r,o,u){if(t!==e||n!==r){var a=o.push(i(o)+"scale(",null,",",null,")");u.push({i:a-4,x:rl(t,e)},{i:a-2,x:rl(n,r)})}else 1===e&&1===r||o.push(i(o)+"scale("+e+","+r+")")}return function(n,e){var r=[],i=[];return n=t(n),e=t(e),o(n.translateX,n.translateY,e.translateX,e.translateY,r,i),u(n.rotate,e.rotate,r,i),a(n.skewX,e.skewX,r,i),c(n.scaleX,n.scaleY,e.scaleX,e.scaleY,r,i),n=e=null,function(t){for(var n,e=-1,o=i.length;++e=0&&n._call.call(null,t),n=n._next;--Ml}function _n(){El=(Sl=Cl.now())+Al,Ml=Tl=0;try{vn()}finally{Ml=0,gn(),El=0}}function yn(){var t=Cl.now(),n=t-Sl;n>Nl&&(Al-=n,Sl=t)}function gn(){for(var t,n,e=Vf,r=1/0;e;)e._call?(r>e._time&&(r=e._time),t=e,e=e._next):(n=e._next,e._next=null,e=t?t._next=n:Vf=n);Wf=t,mn(r)}function mn(t){Ml||(Tl&&(Tl=clearTimeout(Tl)),t-El>24?(t<1/0&&(Tl=setTimeout(_n,t-Cl.now()-Al)),kl&&(kl=clearInterval(kl))):(kl||(Sl=Cl.now(),kl=setInterval(yn,Nl)),Ml=1,zl(_n)))}function xn(t,n){var e=t.__transition;if(!e||!(e=e[n])||e.state>ql)throw new Error("too late");return e}function bn(t,n){var e=t.__transition;if(!e||!(e=e[n])||e.state>Dl)throw new Error("too late");return e}function wn(t,n){var e=t.__transition;if(!e||!(e=e[n]))throw new Error("too late");return e}function Mn(t,n,e){function r(c){var s,f,l,h;if(e.state!==Ul)return o();for(s in a)if((h=a[s]).name===e.name){if(h.state===Ol)return Pl(r);h.state===Fl?(h.state=Yl,h.timer.stop(),h.on.call("interrupt",t,t.__data__,h.index,h.group),delete a[s]):+s=0&&(t=t.slice(0,n)),!t||"start"===t})}function Yn(t,n,e){var r,i,o=In(n)?xn:bn;return function(){var u=o(this,t),a=u.on;a!==r&&(i=(r=a).copy()).on(n,e),u.on=i}}function Bn(t){return function(){var n=this.parentNode;for(var e in this.__transition)if(+e!==t)return;n&&n.removeChild(this)}}function jn(t,n){var e,r,i;return function(){var o=B(this,t),u=(this.style.removeProperty(t),B(this,t));return o===u?null:o===e&&u===r?i:i=n(e=o,r=u)}}function Hn(t){return function(){this.style.removeProperty(t)}}function Xn(t,n,e){var r,i;return function(){var o=B(this,t);return o===e?null:o===r?i:i=n(r=o,e)}}function $n(t,n,e){var r,i,o;return function(){var u=B(this,t),a=e(this);return null==a&&(this.style.removeProperty(t),a=B(this,t)),u===a?null:u===r&&a===i?o:o=n(r=u,i=a)}}function Vn(t,n,e){function r(){var r=this,i=n.apply(r,arguments);return i&&function(n){r.style.setProperty(t,i(n),e)}}return r._value=n,r}function Wn(t){return function(){this.textContent=t}}function Zn(t){return function(){var n=t(this);this.textContent=null==n?"":n}}function Gn(t,n,e,r){this._groups=t,this._parents=n,this._name=e,this._id=r}function Jn(t){return dt().transition(t)}function Qn(){return++$l}function Kn(t){return((t*=2)<=1?t*t:--t*(2-t)+1)/2}function te(t){return((t*=2)<=1?t*t*t:(t-=2)*t*t+2)/2}function ne(t){return(1-Math.cos(Jl*t))/2}function ee(t){return((t*=2)<=1?Math.pow(2,10*t-10):2-Math.pow(2,10-10*t))/2}function re(t){return((t*=2)<=1?1-Math.sqrt(1-t*t):Math.sqrt(1-(t-=2)*t)+1)/2}function ie(t){return(t=+t)Math.abs(t[1]-U[1])?b=!0:x=!0),U=t,m=!0,xh(),o()}function o(){var t;switch(y=U[0]-q[0],g=U[1]-q[1],T){case wh:case bh:k&&(y=Math.max(C-a,Math.min(P-p,y)),s=a+y,d=p+y),N&&(g=Math.max(z-l,Math.min(R-v,g)),h=l+g,_=v+g);break;case Mh:k<0?(y=Math.max(C-a,Math.min(P-a,y)),s=a+y,d=p):k>0&&(y=Math.max(C-p,Math.min(P-p,y)),s=a,d=p+y),N<0?(g=Math.max(z-l,Math.min(R-l,g)),h=l+g,_=v):N>0&&(g=Math.max(z-v,Math.min(R-v,g)),h=l,_=v+g);break;case Th:k&&(s=Math.max(C,Math.min(P,a-y*k)),d=Math.max(C,Math.min(P,p+y*k))),N&&(h=Math.max(z,Math.min(R,l-g*N)),_=Math.max(z,Math.min(R,v+g*N)))}d0&&(a=s-y),N<0?v=_-g:N>0&&(l=h-g),T=wh,F.attr("cursor",Eh.selection),o());break;default:return}xh()},!0).on("keyup.brush",function(){switch(t.event.keyCode){case 16:L&&(x=b=L=!1,o());break;case 18:T===Th&&(k<0?p=d:k>0&&(a=s),N<0?v=_:N>0&&(l=h),T=Mh,o());break;case 32:T===wh&&(t.event.altKey?(k&&(p=d-y*k,a=s+y*k),N&&(v=_-g*N,l=h+g*N),T=Th):(k<0?p=d:k>0&&(a=s),N<0?v=_:N>0&&(l=h),T=Mh),F.attr("cursor",Eh[M]),o());break;default:return}xh()},!0).on("mousemove.brush",e,!0).on("mouseup.brush",u,!0);lf(t.event.view)}ue(),jl(w),r.call(w),D.start()}}function a(){var t=this.__brush||{selection:null};return t.extent=s.apply(this,arguments),t.dim=n,t}var c,s=se,f=ce,l=h(e,"start","brush","end"),p=6;return e.move=function(t,e){t.selection?t.on("start.brush",function(){i(this,arguments).beforestart().start()}).on("interrupt.brush end.brush",function(){i(this,arguments).end()}).tween("brush",function(){function t(t){u.selection=1===t&&le(s)?null:f(t),r.call(o),a.brush()}var o=this,u=o.__brush,a=i(o,arguments),c=u.selection,s=n.input("function"==typeof e?e.apply(this,arguments):e,u.extent),f=cl(c,s);return c&&s?t:t(1)}):t.each(function(){var t=this,o=arguments,u=t.__brush,a=n.input("function"==typeof e?e.apply(t,o):e,u.extent),c=i(t,o).beforestart();jl(t),u.selection=null==a||le(a)?null:a,r.call(t),c.start().brush().end()})},o.prototype={beforestart:function(){return 1==++this.active&&(this.state.emitter=this,this.starting=!0),this},start:function(){return this.starting&&(this.starting=!1,this.emit("start")),this},brush:function(){return this.emit("brush"),this},end:function(){return 0==--this.active&&(delete this.state.emitter,this.emit("end")),this},emit:function(t){N(new mh(e,t,n.output(this.state.selection)),l.apply,l,[t,this.that,this.args])}},e.extent=function(t){return arguments.length?(s="function"==typeof t?t:gh([[+t[0][0],+t[0][1]],[+t[1][0],+t[1][1]]]),e):s},e.filter=function(t){return arguments.length?(f="function"==typeof t?t:gh(!!t),e):f},e.handleSize=function(t){return arguments.length?(p=+t,e):p},e.on=function(){var t=l.on.apply(l,arguments);return t===l?e:t},e}function pe(t){return function(n,e){return t(n.source.value+n.target.value,e.source.value+e.target.value)}}function de(){this._x0=this._y0=this._x1=this._y1=null,this._=""}function ve(){return new de}function _e(t){return t.source}function ye(t){return t.target}function ge(t){return t.radius}function me(t){return t.startAngle}function xe(t){return t.endAngle}function be(){}function we(t,n){var e=new be;if(t instanceof be)t.each(function(t,n){e.set(n,t)});else if(Array.isArray(t)){var r,i=-1,o=t.length;if(null==n)for(;++i=(o=(v+y)/2))?v=o:y=o,(f=e>=(u=(_+g)/2))?_=u:g=u,i=p,!(p=p[l=f<<1|s]))return i[l]=d,t;if(a=+t._x.call(null,p.data),c=+t._y.call(null,p.data),n===a&&e===c)return d.next=p,i?i[l]=d:t._root=d,t;do{i=i?i[l]=new Array(4):t._root=new Array(4),(s=n>=(o=(v+y)/2))?v=o:y=o,(f=e>=(u=(_+g)/2))?_=u:g=u}while((l=f<<1|s)==(h=(c>=u)<<1|a>=o));return i[h]=p,i[l]=d,t}function Re(t){return t[0]}function Le(t){return t[1]}function qe(t,n,e){var r=new Ue(null==n?Re:n,null==e?Le:e,NaN,NaN,NaN,NaN);return null==t?r:r.addAll(t)}function Ue(t,n,e,r,i,o){this._x=t,this._y=n,this._x0=e,this._y0=r,this._x1=i,this._y1=o,this._root=void 0}function De(t){for(var n={data:t.data},e=n;t=t.next;)e=e.next={data:t.data};return n}function Oe(t){return t.x+t.vx}function Fe(t){return t.y+t.vy}function Ie(t){return t.index}function Ye(t,n){var e=t.get(n);if(!e)throw new Error("missing: "+n);return e}function Be(t){return t.x}function je(t){return t.y}function He(t){return new Xe(t)}function Xe(t){if(!(n=xp.exec(t)))throw new Error("invalid format: "+t);var n,e=n[1]||" ",r=n[2]||">",i=n[3]||"-",o=n[4]||"",u=!!n[5],a=n[6]&&+n[6],c=!!n[7],s=n[8]&&+n[8].slice(1),f=n[9]||"";"n"===f?(c=!0,f="g"):mp[f]||(f=""),(u||"0"===e&&"="===r)&&(u=!0,e="0",r="="),this.fill=e,this.align=r,this.sign=i,this.symbol=o,this.zero=u,this.width=a,this.comma=c,this.precision=s,this.type=f}function $e(n){return bp=Tp(n),t.format=bp.format,t.formatPrefix=bp.formatPrefix,bp}function Ve(){this.reset()}function We(t,n,e){var r=t.s=n+e,i=r-n,o=r-i;t.t=n-o+(e-i)}function Ze(t){return t>1?0:t<-1?cd:Math.acos(t)}function Ge(t){return t>1?sd:t<-1?-sd:Math.asin(t)}function Je(t){return(t=wd(t/2))*t}function Qe(){}function Ke(t,n){t&&Sd.hasOwnProperty(t.type)&&Sd[t.type](t,n)}function tr(t,n,e){var r,i=-1,o=t.length-e;for(n.lineStart();++i=0?1:-1,i=r*e,o=yd(n),u=wd(n),a=Rp*u,c=Pp*o+a*yd(i),s=a*r*wd(i);Ad.add(_d(s,c)),zp=t,Pp=o,Rp=u}function ur(t){return[_d(t[1],t[0]),Ge(t[2])]}function ar(t){var n=t[0],e=t[1],r=yd(e);return[r*yd(n),r*wd(n),wd(e)]}function cr(t,n){return t[0]*n[0]+t[1]*n[1]+t[2]*n[2]}function sr(t,n){return[t[1]*n[2]-t[2]*n[1],t[2]*n[0]-t[0]*n[2],t[0]*n[1]-t[1]*n[0]]}function fr(t,n){t[0]+=n[0],t[1]+=n[1],t[2]+=n[2]}function lr(t,n){return[t[0]*n,t[1]*n,t[2]*n]}function hr(t){var n=Td(t[0]*t[0]+t[1]*t[1]+t[2]*t[2]);t[0]/=n,t[1]/=n,t[2]/=n}function pr(t,n){Bp.push(jp=[Lp=t,Up=t]),nDp&&(Dp=n)}function dr(t,n){var e=ar([t*pd,n*pd]);if(Yp){var r=sr(Yp,e),i=sr([r[1],-r[0],0],r);hr(i),i=ur(i);var o,u=t-Op,a=u>0?1:-1,c=i[0]*hd*a,s=dd(u)>180;s^(a*OpDp&&(Dp=o):(c=(c+360)%360-180,s^(a*OpDp&&(Dp=n))),s?txr(Lp,Up)&&(Up=t):xr(t,Up)>xr(Lp,Up)&&(Lp=t):Up>=Lp?(tUp&&(Up=t)):t>Op?xr(Lp,t)>xr(Lp,Up)&&(Up=t):xr(t,Up)>xr(Lp,Up)&&(Lp=t)}else Bp.push(jp=[Lp=t,Up=t]);nDp&&(Dp=n),Yp=e,Op=t}function vr(){Rd.point=dr}function _r(){jp[0]=Lp,jp[1]=Up,Rd.point=pr,Yp=null}function yr(t,n){if(Yp){var e=t-Op;Pd.add(dd(e)>180?e+(e>0?360:-360):e)}else Fp=t,Ip=n;zd.point(t,n),dr(t,n)}function gr(){zd.lineStart()}function mr(){yr(Fp,Ip),zd.lineEnd(),dd(Pd)>ad&&(Lp=-(Up=180)),jp[0]=Lp,jp[1]=Up,Yp=null}function xr(t,n){return(n-=t)<0?n+360:n}function br(t,n){return t[0]-n[0]}function wr(t,n){return t[0]<=t[1]?t[0]<=n&&n<=t[1]:ncd?t-ld:t<-cd?t+ld:t,n]}function Lr(t,n,e){return(t%=ld)?n||e?Ud(Ur(t),Dr(n,e)):Ur(t):n||e?Dr(n,e):Rr}function qr(t){return function(n,e){return n+=t,[n>cd?n-ld:n<-cd?n+ld:n,e]}}function Ur(t){var n=qr(t);return n.invert=qr(-t),n}function Dr(t,n){function e(t,n){var e=yd(n),a=yd(t)*e,c=wd(t)*e,s=wd(n),f=s*r+a*i;return[_d(c*o-f*u,a*r-s*i),Ge(f*o+c*u)]}var r=yd(t),i=wd(t),o=yd(n),u=wd(n);return e.invert=function(t,n){var e=yd(n),a=yd(t)*e,c=wd(t)*e,s=wd(n),f=s*o-c*u;return[_d(c*o+s*u,a*r+f*i),Ge(f*r-a*i)]},e}function Or(t,n,e,r,i,o){if(e){var u=yd(n),a=wd(n),c=r*e;null==i?(i=n+r*ld,o=n-c/2):(i=Fr(u,i),o=Fr(u,o),(r>0?io)&&(i+=r*ld));for(var s,f=i;r>0?f>o:f0)do{s.point(0===f||3===f?t:e,f>1?r:n)}while((f=(f+a+4)%4)!==l);else s.point(o[0],o[1])}function u(r,i){return dd(r[0]-t)0?0:3:dd(r[0]-e)0?2:1:dd(r[1]-n)0?1:0:i>0?3:2}function a(t,n){return c(t.x,n.x)}function c(t,n){var e=u(t,1),r=u(n,1);return e!==r?e-r:0===e?n[1]-t[1]:1===e?t[0]-n[0]:2===e?t[1]-n[1]:n[0]-t[0]}return function(u){function c(t,n){i(t,n)&&w.point(t,n)}function s(){for(var n=0,e=0,i=h.length;er&&(l-o)*(r-u)>(p-u)*(t-o)&&++n:p<=r&&(l-o)*(r-u)<(p-u)*(t-o)&&--n;return n}function f(o,u){var a=i(o,u);if(h&&p.push([o,u]),x)d=o,v=u,_=a,x=!1,a&&(w.lineStart(),w.point(o,u));else if(a&&m)w.point(o,u);else{var c=[y=Math.max(tv,Math.min(Kd,y)),g=Math.max(tv,Math.min(Kd,g))],s=[o=Math.max(tv,Math.min(Kd,o)),u=Math.max(tv,Math.min(Kd,u))];Gd(c,s,t,n,e,r)?(m||(w.lineStart(),w.point(c[0],c[1])),w.point(s[0],s[1]),a||w.lineEnd(),b=!1):a&&(w.lineStart(),w.point(o,u),b=!1)}y=o,g=u,m=a}var l,h,p,d,v,_,y,g,m,x,b,w=u,M=Zd(),T={point:c,lineStart:function(){T.point=f,h&&h.push(p=[]),x=!0,m=!1,y=g=NaN},lineEnd:function(){l&&(f(d,v),_&&m&&M.rejoin(),l.push(M.result())),T.point=c,m&&w.lineEnd()},polygonStart:function(){w=M,l=[],h=[],b=!0},polygonEnd:function(){var t=s(),n=b&&t,e=(l=Cs(l)).length;(n||e)&&(u.polygonStart(),n&&(u.lineStart(),o(null,null,1,u),u.lineEnd()),e&&Qd(l,a,t,o,u),u.polygonEnd()),w=u,l=h=p=null}};return T}}function jr(){iv.point=iv.lineEnd=Qe}function Hr(t,n){Dd=t*=pd,Od=wd(n*=pd),Fd=yd(n),iv.point=Xr}function Xr(t,n){t*=pd;var e=wd(n*=pd),r=yd(n),i=dd(t-Dd),o=yd(i),u=r*wd(i),a=Fd*e-Od*r*o,c=Od*e+Fd*r*o;rv.add(_d(Td(u*u+a*a),c)),Dd=t,Od=e,Fd=r}function $r(t,n){return!(!t||!fv.hasOwnProperty(t.type))&&fv[t.type](t,n)}function Vr(t,n){return 0===cv(t,n)}function Wr(t,n){var e=cv(t[0],t[1]);return cv(t[0],n)+cv(n,t[1])<=e+ad}function Zr(t,n){return!!ev(t.map(Gr),Jr(n))}function Gr(t){return(t=t.map(Jr)).pop(),t}function Jr(t){return[t[0]*pd,t[1]*pd]}function Qr(t,n,e){var r=Ms(t,n-ad,e).concat(n);return function(t){return r.map(function(n){return[t,n]})}}function Kr(t,n,e){var r=Ms(t,n-ad,e).concat(n);return function(t){return r.map(function(n){return[n,t]})}}function ti(){function t(){return{type:"MultiLineString",coordinates:n()}}function n(){return Ms(gd(o/_)*_,i,_).map(h).concat(Ms(gd(s/y)*y,c,y).map(p)).concat(Ms(gd(r/d)*d,e,d).filter(function(t){return dd(t%_)>ad}).map(f)).concat(Ms(gd(a/v)*v,u,v).filter(function(t){return dd(t%y)>ad}).map(l))}var e,r,i,o,u,a,c,s,f,l,h,p,d=10,v=d,_=90,y=360,g=2.5;return t.lines=function(){return n().map(function(t){return{type:"LineString",coordinates:t}})},t.outline=function(){return{type:"Polygon",coordinates:[h(o).concat(p(c).slice(1),h(i).reverse().slice(1),p(s).reverse().slice(1))]}},t.extent=function(n){return arguments.length?t.extentMajor(n).extentMinor(n):t.extentMinor()},t.extentMajor=function(n){return arguments.length?(o=+n[0][0],i=+n[1][0],s=+n[0][1],c=+n[1][1],o>i&&(n=o,o=i,i=n),s>c&&(n=s,s=c,c=n),t.precision(g)):[[o,s],[i,c]]},t.extentMinor=function(n){return arguments.length?(r=+n[0][0],e=+n[1][0],a=+n[0][1],u=+n[1][1],r>e&&(n=r,r=e,e=n),a>u&&(n=a,a=u,u=n),t.precision(g)):[[r,a],[e,u]]},t.step=function(n){return arguments.length?t.stepMajor(n).stepMinor(n):t.stepMinor()},t.stepMajor=function(n){return arguments.length?(_=+n[0],y=+n[1],t):[_,y]},t.stepMinor=function(n){return arguments.length?(d=+n[0],v=+n[1],t):[d,v]},t.precision=function(n){return arguments.length?(g=+n,f=Qr(a,u,90),l=Kr(r,e,g),h=Qr(s,c,90),p=Kr(o,i,g),t):g},t.extentMajor([[-180,-90+ad],[180,90-ad]]).extentMinor([[-180,-80-ad],[180,80+ad]])}function ni(){dv.point=ei}function ei(t,n){dv.point=ri,Id=Bd=t,Yd=jd=n}function ri(t,n){pv.add(jd*t-Bd*n),Bd=t,jd=n}function ii(){ri(Id,Yd)}function oi(t,n){xv+=t,bv+=n,++wv}function ui(){Av.point=ai}function ai(t,n){Av.point=ci,oi($d=t,Vd=n)}function ci(t,n){var e=t-$d,r=n-Vd,i=Td(e*e+r*r);Mv+=i*($d+t)/2,Tv+=i*(Vd+n)/2,kv+=i,oi($d=t,Vd=n)}function si(){Av.point=oi}function fi(){Av.point=hi}function li(){pi(Hd,Xd)}function hi(t,n){Av.point=pi,oi(Hd=$d=t,Xd=Vd=n)}function pi(t,n){var e=t-$d,r=n-Vd,i=Td(e*e+r*r);Mv+=i*($d+t)/2,Tv+=i*(Vd+n)/2,kv+=i,Nv+=(i=Vd*t-$d*n)*($d+t),Sv+=i*(Vd+n),Ev+=3*i,oi($d=t,Vd=n)}function di(t){this._context=t}function vi(t,n){Uv.point=_i,zv=Rv=t,Pv=Lv=n}function _i(t,n){Rv-=t,Lv-=n,qv.add(Td(Rv*Rv+Lv*Lv)),Rv=t,Lv=n}function yi(){this._string=[]}function gi(t){return"m0,"+t+"a"+t+","+t+" 0 1,1 0,"+-2*t+"a"+t+","+t+" 0 1,1 0,"+2*t+"z"}function mi(t){return t.length>1}function xi(t,n){return((t=t.x)[0]<0?t[1]-sd-ad:sd-t[1])-((n=n.x)[0]<0?n[1]-sd-ad:sd-n[1])}function bi(t,n,e,r){var i,o,u=wd(t-e);return dd(u)>ad?vd((wd(n)*(o=yd(r))*wd(e)-wd(r)*(i=yd(n))*wd(t))/(i*o*u)):(n+r)/2}function wi(t){return function(n){var e=new Mi;for(var r in t)e[r]=t[r];return e.stream=n,e}}function Mi(){}function Ti(t,n,e){var r=n[1][0]-n[0][0],i=n[1][1]-n[0][1],o=t.clipExtent&&t.clipExtent();t.scale(150).translate([0,0]),null!=o&&t.clipExtent(null),Ed(e,t.stream(mv));var u=mv.result(),a=Math.min(r/(u[1][0]-u[0][0]),i/(u[1][1]-u[0][1])),c=+n[0][0]+(r-a*(u[1][0]+u[0][0]))/2,s=+n[0][1]+(i-a*(u[1][1]+u[0][1]))/2;return null!=o&&t.clipExtent(o),t.scale(150*a).translate([c,s])}function ki(t,n,e){return Ti(t,[[0,0],n],e)}function Ni(t){return wi({point:function(n,e){n=t(n,e),this.stream.point(n[0],n[1])}})}function Si(t,n){function e(r,i,o,u,a,c,s,f,l,h,p,d,v,_){var y=s-r,g=f-i,m=y*y+g*g;if(m>4*n&&v--){var x=u+h,b=a+p,w=c+d,M=Td(x*x+b*b+w*w),T=Ge(w/=M),k=dd(dd(w)-1)n||dd((y*A+g*C)/m-.5)>.3||u*h+a*p+c*d2?t[2]%360*pd:0,i()):[b*hd,w*hd,M*hd]},n.precision=function(t){return arguments.length?(A=Bv(r,E=t*t),o()):Td(E)},n.fitExtent=function(t,e){return Ti(n,t,e)},n.fitSize=function(t,e){return ki(n,t,e)},function(){return u=t.apply(this,arguments),n.invert=u.invert&&e,i()}}function Ci(t){var n=0,e=cd/3,r=Ai(t),i=r(n,e);return i.parallels=function(t){return arguments.length?r(n=t[0]*pd,e=t[1]*pd):[n*hd,e*hd]},i}function zi(t){function n(t,n){return[t*e,wd(n)/e]}var e=yd(t);return n.invert=function(t,n){return[t/e,Ge(n*e)]},n}function Pi(t,n){function e(t,n){var e=Td(o-2*i*wd(n))/i;return[e*wd(t*=i),u-e*yd(t)]}var r=wd(t),i=(r+wd(n))/2;if(dd(i)0?n<-sd+ad&&(n=-sd+ad):n>sd-ad&&(n=sd-ad);var e=o/bd(Oi(n),i);return[e*wd(i*t),o-e*yd(i*t)]}var r=yd(t),i=t===n?wd(t):xd(r/yd(n))/xd(Oi(n)/Oi(t)),o=r*bd(Oi(t),i)/i;return i?(e.invert=function(t,n){var e=o-n,r=Md(i)*Td(t*t+e*e);return[_d(t,dd(e))/i*Md(e),2*vd(bd(o/r,1/i))-sd]},e):Ui}function Ii(t,n){return[t,n]}function Yi(t,n){function e(t,n){var e=o-n,r=i*t;return[e*wd(r),o-e*yd(r)]}var r=yd(t),i=t===n?wd(t):(r-yd(n))/(n-t),o=r/i+t;return dd(i)=0;)n+=e[r].value;else n=1;t.value=n}function no(t,n){if(t===n)return t;var e=t.ancestors(),r=n.ancestors(),i=null;for(t=e.pop(),n=r.pop();t===n;)i=t,t=e.pop(),n=r.pop();return i}function eo(t,n){var e,r,i,o,u,a=new uo(t),c=+t.value&&(a.value=t.value),s=[a];for(null==n&&(n=ro);e=s.pop();)if(c&&(e.value=+e.data.value),(i=n(e.data))&&(u=i.length))for(e.children=new Array(u),o=u-1;o>=0;--o)s.push(r=e.children[o]=new uo(i[o])),r.parent=e,r.depth=e.depth+1;return a.eachBefore(oo)}function ro(t){return t.children}function io(t){t.data=t.data.data}function oo(t){var n=0;do{t.height=n}while((t=t.parent)&&t.height<++n)}function uo(t){this.data=t,this.depth=this.height=0,this.parent=null}function ao(t){for(var n,e,r=t.length;r;)e=Math.random()*r--|0,n=t[r],t[r]=t[e],t[e]=n;return t}function co(t,n){var e,r;if(lo(n,t))return[n];for(e=0;e0&&e*e>r*r+i*i}function lo(t,n){for(var e=0;ee*e+r*r}function mo(t){var n=t._,e=t.next._,r=n.r+e.r,i=(n.x*e.r+e.x*n.r)/r,o=(n.y*e.r+e.y*n.r)/r;return i*i+o*o}function xo(t){this._=t,this.next=null,this.previous=null}function bo(t){if(!(i=t.length))return 0;var n,e,r,i,o,u,a,c,s,f,l;if(n=t[0],n.x=0,n.y=0,!(i>1))return n.r;if(e=t[1],n.x=-e.r,e.x=n.r,e.y=0,!(i>2))return n.r+e.r;yo(e,n,r=t[2]),n=new xo(n),e=new xo(e),r=new xo(r),n.next=r.previous=e,e.next=n.previous=r,r.next=e.previous=n;t:for(a=3;a=0;)(n=i[o]).z+=e,n.m+=e,e+=n.s+(r+=n.c)}function Uo(t,n,e){return t.a.parent===n.parent?t.a:e}function Do(t,n){this._=t,this.parent=null,this.children=null,this.A=null,this.a=this,this.z=0,this.m=0,this.c=0,this.s=0,this.t=null,this.i=n}function Oo(t){for(var n,e,r,i,o,u=new Do(t,0),a=[u];n=a.pop();)if(r=n._.children)for(n.children=new Array(o=r.length),i=o-1;i>=0;--i)a.push(e=n.children[i]=new Do(r[i],i)),e.parent=n;return(u.parent=new Do(null,0)).children=[u],u}function Fo(t,n,e,r,i,o){for(var u,a,c,s,f,l,h,p,d,v,_,y=[],g=n.children,m=0,x=0,b=g.length,w=n.value;mh&&(h=a),_=f*f*v,(p=Math.max(h/_,_/l))>d){f-=a;break}d=p}y.push(u={value:f,dice:c1&&u_(t[e[r-2]],t[e[r-1]],t[i])<=0;)--r;e[r++]=i}return e.slice(0,r)}function Bo(t){this._size=t,this._call=this._error=null,this._tasks=[],this._data=[],this._waiting=this._active=this._ended=this._start=0}function jo(t){if(!t._start)try{Ho(t)}catch(n){if(t._tasks[t._ended+t._active-1])$o(t,n);else if(!t._data)throw n}}function Ho(t){for(;t._start=t._waiting&&t._active=0;)if((e=t._tasks[r])&&(t._tasks[r]=null,e.abort))try{e.abort()}catch(n){}t._active=NaN,Vo(t)}function Vo(t){if(!t._active&&t._call){var n=t._data;t._data=void 0,t._call(t._error,n)}}function Wo(t){if(null==t)t=1/0;else if(!((t=+t)>=1))throw new Error("invalid concurrency");return new Bo(t)}function Zo(t){return function(n,e){t(null==n?e:null)}}function Go(t){var n=t.responseType;return n&&"text"!==n?t.response:t.responseText}function Jo(t,n){return function(e){return t(e.responseText,n)}}function Qo(t){function n(n){var o=n+"",u=e.get(o);if(!u){if(i!==E_)return i;e.set(o,u=r.push(n))}return t[(u-1)%t.length]}var e=we(),r=[],i=E_;return t=null==t?[]:S_.call(t),n.domain=function(t){if(!arguments.length)return r.slice();r=[],e=we();for(var i,o,u=-1,a=t.length;++u=e?1:r(t)}}}function ru(t){return function(n,e){var r=t(n=+n,e=+e);return function(t){return t<=0?n:t>=1?e:r(t)}}}function iu(t,n,e,r){var i=t[0],o=t[1],u=n[0],a=n[1];return o2?ou:iu,o=u=null,r}function r(n){return(o||(o=i(a,c,f?eu(t):t,s)))(+n)}var i,o,u,a=z_,c=z_,s=cl,f=!1;return r.invert=function(t){return(u||(u=i(c,a,nu,f?ru(n):n)))(+t)},r.domain=function(t){return arguments.length?(a=N_.call(t,C_),e()):a.slice()},r.range=function(t){return arguments.length?(c=S_.call(t),e()):c.slice()},r.rangeRound=function(t){return c=S_.call(t),s=sl,e()},r.clamp=function(t){return arguments.length?(f=!!t,e()):f},r.interpolate=function(t){return arguments.length?(s=t,e()):s},e()}function cu(t){var n=t.domain;return t.ticks=function(t){var e=n();return Ss(e[0],e[e.length-1],null==t?10:t)},t.tickFormat=function(t,e){return P_(n(),t,e)},t.nice=function(e){null==e&&(e=10);var i,o=n(),u=0,a=o.length-1,c=o[u],s=o[a];return s0?i=r(c=Math.floor(c/i)*i,s=Math.ceil(s/i)*i,e):i<0&&(i=r(c=Math.ceil(c*i)/i,s=Math.floor(s*i)/i,e)),i>0?(o[u]=Math.floor(c/i)*i,o[a]=Math.ceil(s/i)*i,n(o)):i<0&&(o[u]=Math.ceil(c*i)/i,o[a]=Math.floor(s*i)/i,n(o)),t},t}function su(){var t=au(nu,rl);return t.copy=function(){return uu(t,su())},cu(t)}function fu(){function t(t){return+t}var n=[0,1];return t.invert=t,t.domain=t.range=function(e){return arguments.length?(n=N_.call(e,C_),t):n.slice()},t.copy=function(){return fu().domain(n)},cu(t)}function lu(t,n){return(n=Math.log(n/t))?function(e){return Math.log(e/t)/n}:A_(n)}function hu(t,n){return t<0?function(e){return-Math.pow(-n,e)*Math.pow(-t,1-e)}:function(e){return Math.pow(n,e)*Math.pow(t,1-e)}}function pu(t){return isFinite(t)?+("1e"+t):t<0?0:t}function du(t){return 10===t?pu:t===Math.E?Math.exp:function(n){return Math.pow(t,n)}}function vu(t){return t===Math.E?Math.log:10===t&&Math.log10||2===t&&Math.log2||(t=Math.log(t),function(n){return Math.log(n)/t})}function _u(t){return function(n){return-t(-n)}}function yu(){function n(){return o=vu(i),u=du(i),r()[0]<0&&(o=_u(o),u=_u(u)),e}var e=au(lu,hu).domain([1,10]),r=e.domain,i=10,o=vu(10),u=du(10);return e.base=function(t){return arguments.length?(i=+t,n()):i},e.domain=function(t){return arguments.length?(r(t),n()):r()},e.ticks=function(t){var n,e=r(),a=e[0],c=e[e.length-1];(n=c0){for(;hc)break;v.push(l)}}else for(;h=1;--f)if(!((l=s*f)c)break;v.push(l)}}else v=Ss(h,p,Math.min(p-h,d)).map(u);return n?v.reverse():v},e.tickFormat=function(n,r){if(null==r&&(r=10===i?".0e":","),"function"!=typeof r&&(r=t.format(r)),n===1/0)return r;null==n&&(n=10);var a=Math.max(1,i*n/e.ticks().length);return function(t){var n=t/u(Math.round(o(t)));return n*i0?i[n-1]:e[0],n=i?[o[i-1],r]:[o[n-1],o[n]]},t.copy=function(){return bu().domain([e,r]).range(u)},cu(t)}function wu(){function t(t){if(t<=t)return e[hs(n,t,0,r)]}var n=[.5],e=[0,1],r=1;return t.domain=function(i){return arguments.length?(n=S_.call(i),r=Math.min(n.length,e.length-1),t):n.slice()},t.range=function(i){return arguments.length?(e=S_.call(i),r=Math.min(n.length,e.length-1),t):e.slice()},t.invertExtent=function(t){var r=e.indexOf(t);return[n[r-1],n[r]]},t.copy=function(){return wu().domain(n).range(e)},t}function Mu(t,n,e,r){function i(n){return t(n=new Date(+n)),n}return i.floor=i,i.ceil=function(e){return t(e=new Date(e-1)),n(e,1),t(e),e},i.round=function(t){var n=i(t),e=i.ceil(t);return t-n0))return u;do{u.push(new Date(+e))}while(n(e,o),t(e),e=n)for(;t(n),!e(n);)n.setTime(n-1)},function(t,r){if(t>=t)if(r<0)for(;++r<=0;)for(;n(t,-1),!e(t););else for(;--r>=0;)for(;n(t,1),!e(t););})},e&&(i.count=function(n,r){return L_.setTime(+n),q_.setTime(+r),t(L_),t(q_),Math.floor(e(L_,q_))},i.every=function(t){return t=Math.floor(t),isFinite(t)&&t>0?t>1?i.filter(r?function(n){return r(n)%t==0}:function(n){return i.count(0,n)%t==0}):i:null}),i}function Tu(t){return Mu(function(n){n.setDate(n.getDate()-(n.getDay()+7-t)%7),n.setHours(0,0,0,0)},function(t,n){t.setDate(t.getDate()+7*n)},function(t,n){return(n-t-(n.getTimezoneOffset()-t.getTimezoneOffset())*O_)/F_})}function ku(t){return Mu(function(n){n.setUTCDate(n.getUTCDate()-(n.getUTCDay()+7-t)%7),n.setUTCHours(0,0,0,0)},function(t,n){t.setUTCDate(t.getUTCDate()+7*n)},function(t,n){return(n-t)/F_})}function Nu(t){if(0<=t.y&&t.y<100){var n=new Date(-1,t.m,t.d,t.H,t.M,t.S,t.L);return n.setFullYear(t.y),n}return new Date(t.y,t.m,t.d,t.H,t.M,t.S,t.L)}function Su(t){if(0<=t.y&&t.y<100){var n=new Date(Date.UTC(-1,t.m,t.d,t.H,t.M,t.S,t.L));return n.setUTCFullYear(t.y),n}return new Date(Date.UTC(t.y,t.m,t.d,t.H,t.M,t.S,t.L))}function Eu(t){return{y:t,m:0,d:1,H:0,M:0,S:0,L:0}}function Au(t){function n(t,n){return function(e){var r,i,o,u=[],a=-1,c=0,s=t.length;for(e instanceof Date||(e=new Date(+e));++a=c)return-1;if(37===(i=n.charCodeAt(u++))){if(i=n.charAt(u++),!(o=T[i in Dy?n.charAt(u++):i])||(r=o(t,e,r))<0)return-1}else if(i!=e.charCodeAt(r++))return-1}return r}var i=t.dateTime,o=t.date,u=t.time,a=t.periods,c=t.days,s=t.shortDays,f=t.months,l=t.shortMonths,h=Pu(a),p=Ru(a),d=Pu(c),v=Ru(c),_=Pu(s),y=Ru(s),g=Pu(f),m=Ru(f),x=Pu(l),b=Ru(l),w={a:function(t){return s[t.getDay()]},A:function(t){return c[t.getDay()]},b:function(t){return l[t.getMonth()]},B:function(t){return f[t.getMonth()]},c:null,d:Wu,e:Wu,H:Zu,I:Gu,j:Ju,L:Qu,m:Ku,M:ta,p:function(t){return a[+(t.getHours()>=12)]},S:na,U:ea,w:ra,W:ia,x:null,X:null,y:oa,Y:ua,Z:aa,"%":wa},M={a:function(t){return s[t.getUTCDay()]},A:function(t){return c[t.getUTCDay()]},b:function(t){return l[t.getUTCMonth()]},B:function(t){return f[t.getUTCMonth()]},c:null,d:ca,e:ca,H:sa,I:fa,j:la,L:ha,m:pa,M:da,p:function(t){return a[+(t.getUTCHours()>=12)]},S:va,U:_a,w:ya,W:ga,x:null,X:null,y:ma,Y:xa,Z:ba,"%":wa},T={a:function(t,n,e){var r=_.exec(n.slice(e));return r?(t.w=y[r[0].toLowerCase()],e+r[0].length):-1},A:function(t,n,e){var r=d.exec(n.slice(e));return r?(t.w=v[r[0].toLowerCase()],e+r[0].length):-1},b:function(t,n,e){var r=x.exec(n.slice(e));return r?(t.m=b[r[0].toLowerCase()],e+r[0].length):-1},B:function(t,n,e){var r=g.exec(n.slice(e));return r?(t.m=m[r[0].toLowerCase()],e+r[0].length):-1},c:function(t,n,e){return r(t,i,n,e)},d:Yu,e:Yu,H:ju,I:ju,j:Bu,L:$u,m:Iu,M:Hu,p:function(t,n,e){var r=h.exec(n.slice(e));return r?(t.p=p[r[0].toLowerCase()],e+r[0].length):-1},S:Xu,U:qu,w:Lu,W:Uu,x:function(t,n,e){return r(t,o,n,e)},X:function(t,n,e){return r(t,u,n,e)},y:Ou,Y:Du,Z:Fu,"%":Vu};return w.x=n(o,w),w.X=n(u,w),w.c=n(i,w),M.x=n(o,M),M.X=n(u,M),M.c=n(i,M),{format:function(t){var e=n(t+="",w);return e.toString=function(){return t},e},parse:function(t){var n=e(t+="",Nu);return n.toString=function(){return t},n},utcFormat:function(t){var e=n(t+="",M);return e.toString=function(){return t},e},utcParse:function(t){var n=e(t,Su);return n.toString=function(){return t},n}}}function Cu(t,n,e){var r=t<0?"-":"",i=(r?-t:t)+"",o=i.length;return r+(o68?1900:2e3),e+r[0].length):-1}function Fu(t,n,e){var r=/^(Z)|([+-]\d\d)(?:\:?(\d\d))?/.exec(n.slice(e,e+6));return r?(t.Z=r[1]?0:-(r[2]+(r[3]||"00")),e+r[0].length):-1}function Iu(t,n,e){var r=Oy.exec(n.slice(e,e+2));return r?(t.m=r[0]-1,e+r[0].length):-1}function Yu(t,n,e){var r=Oy.exec(n.slice(e,e+2));return r?(t.d=+r[0],e+r[0].length):-1}function Bu(t,n,e){var r=Oy.exec(n.slice(e,e+3));return r?(t.m=0,t.d=+r[0],e+r[0].length):-1}function ju(t,n,e){var r=Oy.exec(n.slice(e,e+2));return r?(t.H=+r[0],e+r[0].length):-1}function Hu(t,n,e){var r=Oy.exec(n.slice(e,e+2));return r?(t.M=+r[0],e+r[0].length):-1}function Xu(t,n,e){var r=Oy.exec(n.slice(e,e+2));return r?(t.S=+r[0],e+r[0].length):-1}function $u(t,n,e){var r=Oy.exec(n.slice(e,e+3));return r?(t.L=+r[0],e+r[0].length):-1}function Vu(t,n,e){var r=Fy.exec(n.slice(e,e+1));return r?e+r[0].length:-1}function Wu(t,n){return Cu(t.getDate(),n,2)}function Zu(t,n){return Cu(t.getHours(),n,2)}function Gu(t,n){return Cu(t.getHours()%12||12,n,2)}function Ju(t,n){return Cu(1+$_.count(fy(t),t),n,3)}function Qu(t,n){return Cu(t.getMilliseconds(),n,3)}function Ku(t,n){return Cu(t.getMonth()+1,n,2)}function ta(t,n){return Cu(t.getMinutes(),n,2)}function na(t,n){return Cu(t.getSeconds(),n,2)}function ea(t,n){return Cu(W_.count(fy(t),t),n,2)}function ra(t){return t.getDay()}function ia(t,n){return Cu(Z_.count(fy(t),t),n,2)}function oa(t,n){return Cu(t.getFullYear()%100,n,2)}function ua(t,n){return Cu(t.getFullYear()%1e4,n,4)}function aa(t){var n=t.getTimezoneOffset();return(n>0?"-":(n*=-1,"+"))+Cu(n/60|0,"0",2)+Cu(n%60,"0",2)}function ca(t,n){return Cu(t.getUTCDate(),n,2)}function sa(t,n){return Cu(t.getUTCHours(),n,2)}function fa(t,n){return Cu(t.getUTCHours()%12||12,n,2)}function la(t,n){return Cu(1+_y.count(Ly(t),t),n,3)}function ha(t,n){return Cu(t.getUTCMilliseconds(),n,3)}function pa(t,n){return Cu(t.getUTCMonth()+1,n,2)}function da(t,n){return Cu(t.getUTCMinutes(),n,2)}function va(t,n){return Cu(t.getUTCSeconds(),n,2)}function _a(t,n){return Cu(gy.count(Ly(t),t),n,2)}function ya(t){return t.getUTCDay()}function ga(t,n){return Cu(my.count(Ly(t),t),n,2)}function ma(t,n){return Cu(t.getUTCFullYear()%100,n,2)}function xa(t,n){return Cu(t.getUTCFullYear()%1e4,n,4)}function ba(){return"+0000"}function wa(){return"%"}function Ma(n){return qy=Au(n),t.timeFormat=qy.format,t.timeParse=qy.parse,t.utcFormat=qy.utcFormat,t.utcParse=qy.utcParse,qy}function Ta(t){return new Date(t)}function ka(t){return t instanceof Date?+t:+new Date(+t)}function Na(t,n,e,r,o,u,a,c,s){function f(i){return(a(i)1?0:t<-1?gg:Math.acos(t)}function Ca(t){return t>=1?mg:t<=-1?-mg:Math.asin(t)}function za(t){return t.innerRadius}function Pa(t){return t.outerRadius}function Ra(t){return t.startAngle}function La(t){return t.endAngle}function qa(t){return t&&t.padAngle}function Ua(t,n,e,r,i,o,u,a){var c=e-t,s=r-n,f=u-i,l=a-o,h=(f*(n-o)-l*(t-i))/(l*c-f*s);return[t+h*c,n+h*s]}function Da(t,n,e,r,i,o,u){var a=t-e,c=n-r,s=(u?o:-o)/_g(a*a+c*c),f=s*c,l=-s*a,h=t+f,p=n+l,d=e+f,v=r+l,_=(h+d)/2,y=(p+v)/2,g=d-h,m=v-p,x=g*g+m*m,b=i-o,w=h*v-d*p,M=(m<0?-1:1)*_g(pg(0,b*b*x-w*w)),T=(w*m-g*M)/x,k=(-w*g-m*M)/x,N=(w*m+g*M)/x,S=(-w*g+m*M)/x,E=T-_,A=k-y,C=N-_,z=S-y;return E*E+A*A>C*C+z*z&&(T=N,k=S),{cx:T,cy:k,x01:-f,y01:-l,x11:T*(i/b-1),y11:k*(i/b-1)}}function Oa(t){this._context=t}function Fa(t){return t[0]}function Ia(t){return t[1]}function Ya(t){this._curve=t}function Ba(t){function n(n){return new Ya(t(n))}return n._curve=t,n}function ja(t){var n=t.curve;return t.angle=t.x,delete t.x,t.radius=t.y,delete t.y,t.curve=function(t){return arguments.length?n(Ba(t)):n()._curve},t}function Ha(t){return t.source}function Xa(t){return t.target}function $a(t){function n(){var n,a=Cg.call(arguments),c=e.apply(this,a),s=r.apply(this,a);if(u||(u=n=ve()),t(u,+i.apply(this,(a[0]=c,a)),+o.apply(this,a),+i.apply(this,(a[0]=s,a)),+o.apply(this,a)),n)return u=null,n+""||null}var e=Ha,r=Xa,i=Fa,o=Ia,u=null;return n.source=function(t){return arguments.length?(e=t,n):e},n.target=function(t){return arguments.length?(r=t,n):r},n.x=function(t){return arguments.length?(i="function"==typeof t?t:sg(+t),n):i},n.y=function(t){return arguments.length?(o="function"==typeof t?t:sg(+t),n):o},n.context=function(t){return arguments.length?(u=null==t?null:t,n):u},n}function Va(t,n,e,r,i){t.moveTo(n,e),t.bezierCurveTo(n=(n+r)/2,e,n,i,r,i)}function Wa(t,n,e,r,i){t.moveTo(n,e),t.bezierCurveTo(n,e=(e+i)/2,r,e,r,i)}function Za(t,n,e,r,i){var o=Ag(n,e),u=Ag(n,e=(e+i)/2),a=Ag(r,e),c=Ag(r,i);t.moveTo(o[0],o[1]),t.bezierCurveTo(u[0],u[1],a[0],a[1],c[0],c[1])}function Ga(t,n,e){t._context.bezierCurveTo((2*t._x0+t._x1)/3,(2*t._y0+t._y1)/3,(t._x0+2*t._x1)/3,(t._y0+2*t._y1)/3,(t._x0+4*t._x1+n)/6,(t._y0+4*t._y1+e)/6)}function Ja(t){this._context=t}function Qa(t){this._context=t}function Ka(t){this._context=t}function tc(t,n){this._basis=new Ja(t),this._beta=n}function nc(t,n,e){t._context.bezierCurveTo(t._x1+t._k*(t._x2-t._x0),t._y1+t._k*(t._y2-t._y0),t._x2+t._k*(t._x1-n),t._y2+t._k*(t._y1-e),t._x2,t._y2)}function ec(t,n){this._context=t,this._k=(1-n)/6}function rc(t,n){this._context=t,this._k=(1-n)/6}function ic(t,n){this._context=t,this._k=(1-n)/6}function oc(t,n,e){var r=t._x1,i=t._y1,o=t._x2,u=t._y2;if(t._l01_a>yg){var a=2*t._l01_2a+3*t._l01_a*t._l12_a+t._l12_2a,c=3*t._l01_a*(t._l01_a+t._l12_a);r=(r*a-t._x0*t._l12_2a+t._x2*t._l01_2a)/c,i=(i*a-t._y0*t._l12_2a+t._y2*t._l01_2a)/c}if(t._l23_a>yg){var s=2*t._l23_2a+3*t._l23_a*t._l12_a+t._l12_2a,f=3*t._l23_a*(t._l23_a+t._l12_a);o=(o*s+t._x1*t._l23_2a-n*t._l12_2a)/f,u=(u*s+t._y1*t._l23_2a-e*t._l12_2a)/f}t._context.bezierCurveTo(r,i,o,u,t._x2,t._y2)}function uc(t,n){this._context=t,this._alpha=n}function ac(t,n){this._context=t,this._alpha=n}function cc(t,n){this._context=t,this._alpha=n}function sc(t){this._context=t}function fc(t){return t<0?-1:1}function lc(t,n,e){var r=t._x1-t._x0,i=n-t._x1,o=(t._y1-t._y0)/(r||i<0&&-0),u=(e-t._y1)/(i||r<0&&-0),a=(o*i+u*r)/(r+i);return(fc(o)+fc(u))*Math.min(Math.abs(o),Math.abs(u),.5*Math.abs(a))||0}function hc(t,n){var e=t._x1-t._x0;return e?(3*(t._y1-t._y0)/e-n)/2:n}function pc(t,n,e){var r=t._x0,i=t._y0,o=t._x1,u=t._y1,a=(o-r)/3;t._context.bezierCurveTo(r+a,i+a*n,o-a,u-a*e,o,u)}function dc(t){this._context=t}function vc(t){this._context=new _c(t)}function _c(t){this._context=t}function yc(t){this._context=t}function gc(t){var n,e,r=t.length-1,i=new Array(r),o=new Array(r),u=new Array(r);for(i[0]=0,o[0]=2,u[0]=t[0]+2*t[1],n=1;n=0;--n)i[n]=(u[n]-i[n+1])/o[n];for(o[r-1]=(t[r]+i[r-1])/2,n=0;n0)){if(o/=h,h<0){if(o0){if(o>l)return;o>f&&(f=o)}if(o=r-c,h||!(o<0)){if(o/=h,h<0){if(o>l)return;o>f&&(f=o)}else if(h>0){if(o0)){if(o/=p,p<0){if(o0){if(o>l)return;o>f&&(f=o)}if(o=i-s,p||!(o<0)){if(o/=p,p<0){if(o>l)return;o>f&&(f=o)}else if(p>0){if(o0||l<1)||(f>0&&(t[0]=[c+f*h,s+f*p]),l<1&&(t[1]=[c+l*h,s+l*p]),!0)}}}}}function Rc(t,n,e,r,i){var o=t[1];if(o)return!0;var u,a,c=t[0],s=t.left,f=t.right,l=s[0],h=s[1],p=f[0],d=f[1],v=(l+p)/2,_=(h+d)/2;if(d===h){if(v=r)return;if(l>p){if(c){if(c[1]>=i)return}else c=[v,e];o=[v,i]}else{if(c){if(c[1]1)if(l>p){if(c){if(c[1]>=i)return}else c=[(e-a)/u,e];o=[(i-a)/u,i]}else{if(c){if(c[1]=r)return}else c=[n,u*n+a];o=[r,u*r+a]}else{if(c){if(c[0]dm||Math.abs(i[0][1]-i[1][1])>dm)||delete lm[o]}function qc(t){return sm[t.index]={site:t,halfedges:[]}}function Uc(t,n){var e=t.site,r=n.left,i=n.right;return e===i&&(i=r,r=e),i?Math.atan2(i[1]-r[1],i[0]-r[0]):(e===r?(r=n[1],i=n[0]):(r=n[0],i=n[1]),Math.atan2(r[0]-i[0],i[1]-r[1]))}function Dc(t,n){return n[+(n.left!==t.site)]}function Oc(t,n){return n[+(n.left===t.site)]}function Fc(){for(var t,n,e,r,i=0,o=sm.length;idm||Math.abs(v-h)>dm)&&(c.splice(a,0,lm.push(Cc(u,p,Math.abs(d-t)dm?[t,Math.abs(l-t)dm?[Math.abs(h-r)dm?[e,Math.abs(l-e)dm?[Math.abs(h-n)=-vm)){var p=c*c+s*s,d=f*f+l*l,v=(l*p-s*d)/h,_=(c*d-f*p)/h,y=hm.pop()||new Yc;y.arc=t,y.site=i,y.x=v+u,y.y=(y.cy=_+a)+Math.sqrt(v*v+_*_),t.circle=y;for(var g=null,m=fm._;m;)if(y.ydm)a=a.L;else{if(!((i=o-Gc(a,u))>dm)){r>-dm?(n=a.P,e=a):i>-dm?(n=a,e=a.N):n=e=a;break}if(!a.R){n=a;break}a=a.R}qc(t);var c=Xc(t);if(cm.insert(n,c),n||e){if(n===e)return jc(n),e=Xc(n.site),cm.insert(c,e),c.edge=e.edge=Ac(n.site,c.site),Bc(n),void Bc(e);if(e){jc(n),jc(e);var s=n.site,f=s[0],l=s[1],h=t[0]-f,p=t[1]-l,d=e.site,v=d[0]-f,_=d[1]-l,y=2*(h*_-p*v),g=h*h+p*p,m=v*v+_*_,x=[(_*g-p*m)/y+f,(h*m-v*g)/y+l];zc(e.edge,s,d,x),c.edge=Ac(s,t,null,x),e.edge=Ac(t,d,null,x),Bc(n),Bc(e)}else c.edge=Ac(n.site,c.site)}}function Zc(t,n){var e=t.site,r=e[0],i=e[1],o=i-n;if(!o)return r;var u=t.P;if(!u)return-1/0;var a=(e=u.site)[0],c=e[1],s=c-n;if(!s)return a;var f=a-r,l=1/o-1/s,h=f/s;return l?(-h+Math.sqrt(h*h-2*l*(f*f/(-2*s)-c+s/2+i-o/2)))/l+r:(r+a)/2}function Gc(t,n){var e=t.N;if(e)return Zc(e,n);var r=t.site;return r[1]===n?r[0]:1/0}function Jc(t,n,e){return(t[0]-e[0])*(n[1]-t[1])-(t[0]-n[0])*(e[1]-t[1])}function Qc(t,n){return n[1]-t[1]||n[0]-t[0]}function Kc(t,n){var e,r,i,o=t.sort(Qc).pop();for(lm=[],sm=new Array(t.length),cm=new Tc,fm=new Tc;;)if(i=am,o&&(!i||o[1]n?1:t>=n?0:NaN},fs=function(t){return 1===t.length&&(t=n(t)),{left:function(n,e,r,i){for(null==r&&(r=0),null==i&&(i=n.length);r>>1;t(n[o],e)<0?r=o+1:i=o}return r},right:function(n,e,r,i){for(null==r&&(r=0),null==i&&(i=n.length);r>>1;t(n[o],e)>0?i=o:r=o+1}return r}}},ls=fs(ss),hs=ls.right,ps=ls.left,ds=function(t){return null===t?NaN:+t},vs=function(t,n){var e,r,i=t.length,o=0,u=-1,a=0,c=0;if(null==n)for(;++u1)return c/(o-1)},_s=function(t,n){var e=vs(t,n);return e?Math.sqrt(e):e},ys=function(t,n){var e,r,i,o=t.length,u=-1;if(null==n){for(;++u=e)for(r=i=e;++ue&&(r=e),i=e)for(r=i=e;++ue&&(r=e),i0)for(t=Math.ceil(t/u),n=Math.floor(n/u),o=new Array(i=Math.ceil(n-t+1));++c=1)return+e(t[r-1],r-1,t);var r,i=(r-1)*n,o=Math.floor(i),u=+e(t[o],o,t);return u+(+e(t[o+1],o+1,t)-u)*(i-o)}},Cs=function(t){for(var n,e,r,i=t.length,o=-1,u=0;++o=0;)for(n=(r=t[i]).length;--n>=0;)e[--u]=r[n];return e},zs=function(t,n){var e,r,i=t.length,o=-1;if(null==n){for(;++o=e)for(r=e;++oe&&(r=e)}else for(;++o=e)for(r=e;++oe&&(r=e);return r},Ps=function(t){if(!(i=t.length))return[];for(var n=-1,e=zs(t,o),r=new Array(e);++n0)for(var e,r,i=new Array(e),o=0;o=0&&"xmlns"!==(n=t.slice(0,e))&&(t=t.slice(e+1)),Bs.hasOwnProperty(n)?{space:Bs[n],local:t}:t},Hs=function(t){var n=js(t);return(n.local?g:y)(n)},Xs=0;x.prototype=m.prototype={constructor:x,get:function(t){for(var n=this._;!(n in t);)if(!(t=t.parentNode))return;return t[n]},set:function(t,n){return t[this._]=n},remove:function(t){return this._ in t&&delete t[this._]},toString:function(){return this._}};var $s=function(t){return function(){return this.matches(t)}};if("undefined"!=typeof document){var Vs=document.documentElement;if(!Vs.matches){var Ws=Vs.webkitMatchesSelector||Vs.msMatchesSelector||Vs.mozMatchesSelector||Vs.oMatchesSelector;$s=function(t){return function(){return Ws.call(this,t)}}}}var Zs=$s,Gs={};t.event=null,"undefined"!=typeof document&&("onmouseenter"in document.documentElement||(Gs={mouseenter:"mouseover",mouseleave:"mouseout"}));var Js=function(){for(var n,e=t.event;n=e.sourceEvent;)e=n;return e},Qs=function(t,n){var e=t.ownerSVGElement||t;if(e.createSVGPoint){var r=e.createSVGPoint();return r.x=n.clientX,r.y=n.clientY,r=r.matrixTransform(t.getScreenCTM().inverse()),[r.x,r.y]}var i=t.getBoundingClientRect();return[n.clientX-i.left-t.clientLeft,n.clientY-i.top-t.clientTop]},Ks=function(t){var n=Js();return n.changedTouches&&(n=n.changedTouches[0]),Qs(t,n)},tf=function(t){return null==t?S:function(){return this.querySelector(t)}},nf=function(t){return null==t?E:function(){return this.querySelectorAll(t)}},ef=function(t){return new Array(t.length)};A.prototype={constructor:A,appendChild:function(t){return this._parent.insertBefore(t,this._next)},insertBefore:function(t,n){return this._parent.insertBefore(t,n)},querySelector:function(t){return this._parent.querySelector(t)},querySelectorAll:function(t){return this._parent.querySelectorAll(t)}};var rf=function(t){return function(){return t}},of="$",uf=function(t){return t.ownerDocument&&t.ownerDocument.defaultView||t.document&&t||t.defaultView};W.prototype={add:function(t){this._names.indexOf(t)<0&&(this._names.push(t),this._node.setAttribute("class",this._names.join(" ")))},remove:function(t){var n=this._names.indexOf(t);n>=0&&(this._names.splice(n,1),this._node.setAttribute("class",this._names.join(" ")))},contains:function(t){return this._names.indexOf(t)>=0}};var af=[null];pt.prototype=dt.prototype={constructor:pt,select:function(t){"function"!=typeof t&&(t=tf(t));for(var n=this._groups,e=n.length,r=new Array(e),i=0;i=x&&(x=m+1);!(g=_[x])&&++x=0;)(r=i[o])&&(u&&u!==r.nextSibling&&u.parentNode.insertBefore(r,u),u=r);return this},sort:function(t){t||(t=P);for(var n=this._groups,e=n.length,r=new Array(e),i=0;i1?this.each((null==n?F:"function"==typeof n?Y:I)(t,n,null==e?"":e)):B(this.node(),t)},property:function(t,n){return arguments.length>1?this.each((null==n?j:"function"==typeof n?X:H)(t,n)):this.node()[t]},classed:function(t,n){var e=$(t+"");if(arguments.length<2){for(var r=V(this.node()),i=-1,o=e.length;++i=240?t-240:t+120,i,r),Lt(t,i,r),Lt(t<120?t+240:t-120,i,r),this.opacity)},displayable:function(){return(0<=this.s&&this.s<=1||isNaN(this.s))&&0<=this.l&&this.l<=1&&0<=this.opacity&&this.opacity<=1}}));var Nf=Math.PI/180,Sf=180/Math.PI,Ef=.95047,Af=1,Cf=1.08883,zf=4/29,Pf=6/29,Rf=3*Pf*Pf,Lf=Pf*Pf*Pf;pf(Dt,Ut,wt(Mt,{brighter:function(t){return new Dt(this.l+18*(null==t?1:t),this.a,this.b,this.opacity)},darker:function(t){return new Dt(this.l-18*(null==t?1:t),this.a,this.b,this.opacity)},rgb:function(){var t=(this.l+16)/116,n=isNaN(this.a)?t:t+this.a/500,e=isNaN(this.b)?t:t-this.b/200;return t=Af*Ft(t),n=Ef*Ft(n),e=Cf*Ft(e),new At(It(3.2404542*n-1.5371385*t-.4985314*e),It(-.969266*n+1.8760108*t+.041556*e),It(.0556434*n-.2040259*t+1.0572252*e),this.opacity)}})),pf(Ht,jt,wt(Mt,{brighter:function(t){return new Ht(this.h,this.c,this.l+18*(null==t?1:t),this.opacity)},darker:function(t){return new Ht(this.h,this.c,this.l-18*(null==t?1:t),this.opacity)},rgb:function(){return qt(this).rgb()}}));var qf=-.14861,Uf=1.78277,Df=-.29227,Of=-.90649,Ff=1.97294,If=Ff*Of,Yf=Ff*Uf,Bf=Uf*Df-Of*qf;pf(Vt,$t,wt(Mt,{brighter:function(t){return t=null==t?1/.7:Math.pow(1/.7,t),new Vt(this.h,this.s,this.l*t,this.opacity)},darker:function(t){return t=null==t?.7:Math.pow(.7,t),new Vt(this.h,this.s,this.l*t,this.opacity)},rgb:function(){var t=isNaN(this.h)?0:(this.h+120)*Nf,n=+this.l,e=isNaN(this.s)?0:this.s*n*(1-n),r=Math.cos(t),i=Math.sin(t);return new At(255*(n+e*(qf*r+Uf*i)),255*(n+e*(Df*r+Of*i)),255*(n+e*(Ff*r)),this.opacity)}}));var jf,Hf,Xf,$f,Vf,Wf,Zf=function(t){var n=t.length-1;return function(e){var r=e<=0?e=0:e>=1?(e=1,n-1):Math.floor(e*n),i=t[r],o=t[r+1],u=r>0?t[r-1]:2*i-o,a=ro&&(i=n.slice(o,i),a[u]?a[u]+=i:a[++u]=i),(e=e[0])===(r=r[0])?a[u]?a[u]+=r:a[++u]=r:(a[++u]=null,c.push({i:u,x:rl(e,r)})),o=ul.lastIndex;return oDl&&e.state1e-6)if(Math.abs(f*a-c*s)>1e-6&&i){var h=e-o,p=r-u,d=a*a+c*c,v=h*h+p*p,_=Math.sqrt(d),y=Math.sqrt(l),g=i*Math.tan((Yh-Math.acos((d+l-v)/(2*_*y)))/2),m=g/y,x=g/_;Math.abs(m-1)>1e-6&&(this._+="L"+(t+m*s)+","+(n+m*f)),this._+="A"+i+","+i+",0,0,"+ +(f*h>s*p)+","+(this._x1=t+x*a)+","+(this._y1=n+x*c)}else this._+="L"+(this._x1=t)+","+(this._y1=n);else;},arc:function(t,n,e,r,i,o){t=+t,n=+n;var u=(e=+e)*Math.cos(r),a=e*Math.sin(r),c=t+u,s=n+a,f=1^o,l=o?r-i:i-r;if(e<0)throw new Error("negative radius: "+e);null===this._x1?this._+="M"+c+","+s:(Math.abs(this._x1-c)>1e-6||Math.abs(this._y1-s)>1e-6)&&(this._+="L"+c+","+s),e&&(l<0&&(l=l%Bh+Bh),l>jh?this._+="A"+e+","+e+",0,1,"+f+","+(t-u)+","+(n-a)+"A"+e+","+e+",0,1,"+f+","+(this._x1=c)+","+(this._y1=s):l>1e-6&&(this._+="A"+e+","+e+",0,"+ +(l>=Yh)+","+f+","+(this._x1=t+e*Math.cos(i))+","+(this._y1=n+e*Math.sin(i))))},rect:function(t,n,e,r){this._+="M"+(this._x0=this._x1=+t)+","+(this._y0=this._y1=+n)+"h"+ +e+"v"+ +r+"h"+-e+"Z"},toString:function(){return this._}};be.prototype=we.prototype={constructor:be,has:function(t){return"$"+t in this},get:function(t){return this["$"+t]},set:function(t,n){return this["$"+t]=n,this},remove:function(t){var n="$"+t;return n in this&&delete this[n]},clear:function(){for(var t in this)"$"===t[0]&&delete this[t]},keys:function(){var t=[];for(var n in this)"$"===n[0]&&t.push(n.slice(1));return t},values:function(){var t=[];for(var n in this)"$"===n[0]&&t.push(this[n]);return t},entries:function(){var t=[];for(var n in this)"$"===n[0]&&t.push({key:n.slice(1),value:this[n]});return t},size:function(){var t=0;for(var n in this)"$"===n[0]&&++t;return t},empty:function(){for(var t in this)if("$"===t[0])return!1;return!0},each:function(t){for(var n in this)"$"===n[0]&&t(this[n],n.slice(1),this)}};var Hh=we.prototype;Se.prototype=Ee.prototype={constructor:Se,has:Hh.has,add:function(t){return t+="",this["$"+t]=t,this},remove:Hh.remove,clear:Hh.clear,values:Hh.keys,size:Hh.size,empty:Hh.empty,each:Hh.each};var Xh={},$h={},Vh=34,Wh=10,Zh=13,Gh=function(t){function n(t,n){function e(){if(s)return $h;if(f)return f=!1,Xh;var n,e,r=a;if(t.charCodeAt(r)===Vh){for(;a++=u?s=!0:(e=t.charCodeAt(a++))===Wh?f=!0:e===Zh&&(f=!0,t.charCodeAt(a)===Wh&&++a),t.slice(r+1,n-1).replace(/""/g,'"')}for(;af&&(f=r),il&&(l=i));for(ft||t>i||r>n||n>o))return this;var u,a,c=i-e,s=this._root;switch(a=(n<(r+o)/2)<<1|t<(e+i)/2){case 0:do{u=new Array(4),u[a]=s,s=u}while(c*=2,i=e+c,o=r+c,t>i||n>o);break;case 1:do{u=new Array(4),u[a]=s,s=u}while(c*=2,e=i-c,o=r+c,e>t||n>o);break;case 2:do{u=new Array(4),u[a]=s,s=u}while(c*=2,i=e+c,r=o-c,t>i||r>n);break;case 3:do{u=new Array(4),u[a]=s,s=u}while(c*=2,e=i-c,r=o-c,e>t||r>n)}this._root&&this._root.length&&(this._root=s)}return this._x0=e,this._y0=r,this._x1=i,this._y1=o,this},fp.data=function(){var t=[];return this.visit(function(n){if(!n.length)do{t.push(n.data)}while(n=n.next)}),t},fp.extent=function(t){return arguments.length?this.cover(+t[0][0],+t[0][1]).cover(+t[1][0],+t[1][1]):isNaN(this._x0)?void 0:[[this._x0,this._y0],[this._x1,this._y1]]},fp.find=function(t,n,e){var r,i,o,u,a,c,s,f=this._x0,l=this._y0,h=this._x1,p=this._y1,d=[],v=this._root;for(v&&d.push(new sp(v,f,l,h,p)),null==e?e=1/0:(f=t-e,l=n-e,h=t+e,p=n+e,e*=e);c=d.pop();)if(!(!(v=c.node)||(i=c.x0)>h||(o=c.y0)>p||(u=c.x1)=y)<<1|t>=_)&&(c=d[d.length-1],d[d.length-1]=d[d.length-1-s],d[d.length-1-s]=c)}else{var g=t-+this._x.call(null,v.data),m=n-+this._y.call(null,v.data),x=g*g+m*m;if(x=(a=(d+_)/2))?d=a:_=a,(f=u>=(c=(v+y)/2))?v=c:y=c,n=p,!(p=p[l=f<<1|s]))return this;if(!p.length)break;(n[l+1&3]||n[l+2&3]||n[l+3&3])&&(e=n,h=l)}for(;p.data!==t;)if(r=p,!(p=p.next))return this;return(i=p.next)&&delete p.next,r?(i?r.next=i:delete r.next,this):n?(i?n[l]=i:delete n[l],(p=n[0]||n[1]||n[2]||n[3])&&p===(n[3]||n[2]||n[1]||n[0])&&!p.length&&(e?e[h]=p:this._root=p),this):(this._root=i,this)},fp.removeAll=function(t){for(var n=0,e=t.length;n1?r[0]+r.slice(2):r,+t.slice(e+1)]},vp=function(t){return(t=dp(Math.abs(t)))?t[1]:NaN},_p=function(t,n){return function(e,r){for(var i=e.length,o=[],u=0,a=t[0],c=0;i>0&&a>0&&(c+a+1>r&&(a=Math.max(1,r-c)),o.push(e.substring(i-=a,i+a)),!((c+=a+1)>r));)a=t[u=(u+1)%t.length];return o.reverse().join(n)}},yp=function(t){return function(n){return n.replace(/[0-9]/g,function(n){return t[+n]})}},gp=function(t,n){var e=dp(t,n);if(!e)return t+"";var r=e[0],i=e[1];return i<0?"0."+new Array(-i).join("0")+r:r.length>i+1?r.slice(0,i+1)+"."+r.slice(i+1):r+new Array(i-r.length+2).join("0")},mp={"":function(t,n){t:for(var e,r=(t=t.toPrecision(n)).length,i=1,o=-1;i0&&(o=0)}return o>0?t.slice(0,o)+t.slice(e+1):t},"%":function(t,n){return(100*t).toFixed(n)},b:function(t){return Math.round(t).toString(2)},c:function(t){return t+""},d:function(t){return Math.round(t).toString(10)},e:function(t,n){return t.toExponential(n)},f:function(t,n){return t.toFixed(n)},g:function(t,n){return t.toPrecision(n)},o:function(t){return Math.round(t).toString(8)},p:function(t,n){return gp(100*t,n)},r:gp,s:function(t,n){var e=dp(t,n);if(!e)return t+"";var r=e[0],i=e[1],o=i-(lp=3*Math.max(-8,Math.min(8,Math.floor(i/3))))+1,u=r.length;return o===u?r:o>u?r+new Array(o-u+1).join("0"):o>0?r.slice(0,o)+"."+r.slice(o):"0."+new Array(1-o).join("0")+dp(t,Math.max(0,n+o-1))[0]},X:function(t){return Math.round(t).toString(16).toUpperCase()},x:function(t){return Math.round(t).toString(16)}},xp=/^(?:(.)?([<>=^]))?([+\-\( ])?([$#])?(0)?(\d+)?(,)?(\.\d+)?([a-z%])?$/i;He.prototype=Xe.prototype,Xe.prototype.toString=function(){return this.fill+this.align+this.sign+this.symbol+(this.zero?"0":"")+(null==this.width?"":Math.max(1,0|this.width))+(this.comma?",":"")+(null==this.precision?"":"."+Math.max(0,0|this.precision))+this.type};var bp,wp=function(t){return t},Mp=["y","z","a","f","p","n","µ","m","","k","M","G","T","P","E","Z","Y"],Tp=function(t){function n(t){function n(t){var n,r,u,f=_,x=y;if("c"===v)x=g(t)+x,t="";else{var b=(t=+t)<0;if(t=g(Math.abs(t),d),b&&0==+t&&(b=!1),f=(b?"("===s?s:"-":"-"===s||"("===s?"":s)+f,x=x+("s"===v?Mp[8+lp/3]:"")+(b&&"("===s?")":""),m)for(n=-1,r=t.length;++n(u=t.charCodeAt(n))||u>57){x=(46===u?i+t.slice(n+1):t.slice(n))+x,t=t.slice(0,n);break}}p&&!l&&(t=e(t,1/0));var w=f.length+t.length+x.length,M=w>1)+f+t+x+M.slice(w);break;default:t=M+f+t+x}return o(t)}var a=(t=He(t)).fill,c=t.align,s=t.sign,f=t.symbol,l=t.zero,h=t.width,p=t.comma,d=t.precision,v=t.type,_="$"===f?r[0]:"#"===f&&/[boxX]/.test(v)?"0"+v.toLowerCase():"",y="$"===f?r[1]:/[%p]/.test(v)?u:"",g=mp[v],m=!v||/[defgprs%]/.test(v);return d=null==d?v?6:12:/[gprs]/.test(v)?Math.max(1,Math.min(21,d)):Math.max(0,Math.min(20,d)),n.toString=function(){return t+""},n}var e=t.grouping&&t.thousands?_p(t.grouping,t.thousands):wp,r=t.currency,i=t.decimal,o=t.numerals?yp(t.numerals):wp,u=t.percent||"%";return{format:n,formatPrefix:function(t,e){var r=n((t=He(t),t.type="f",t)),i=3*Math.max(-8,Math.min(8,Math.floor(vp(e)/3))),o=Math.pow(10,-i),u=Mp[8+i/3];return function(t){return r(o*t)+u}}}};$e({decimal:".",thousands:",",grouping:[3],currency:["$",""]});var kp=function(t){return Math.max(0,-vp(Math.abs(t)))},Np=function(t,n){return Math.max(0,3*Math.max(-8,Math.min(8,Math.floor(vp(n)/3)))-vp(Math.abs(t)))},Sp=function(t,n){return t=Math.abs(t),n=Math.abs(n)-t,Math.max(0,vp(n)-vp(t))+1},Ep=function(){return new Ve};Ve.prototype={constructor:Ve,reset:function(){this.s=this.t=0},add:function(t){We(ud,t,this.t),We(this,ud.s,this.s),this.s?this.t+=ud.t:this.s=ud.t},valueOf:function(){return this.s}};var Ap,Cp,zp,Pp,Rp,Lp,qp,Up,Dp,Op,Fp,Ip,Yp,Bp,jp,Hp,Xp,$p,Vp,Wp,Zp,Gp,Jp,Qp,Kp,td,nd,ed,rd,id,od,ud=new Ve,ad=1e-6,cd=Math.PI,sd=cd/2,fd=cd/4,ld=2*cd,hd=180/cd,pd=cd/180,dd=Math.abs,vd=Math.atan,_d=Math.atan2,yd=Math.cos,gd=Math.ceil,md=Math.exp,xd=Math.log,bd=Math.pow,wd=Math.sin,Md=Math.sign||function(t){return t>0?1:t<0?-1:0},Td=Math.sqrt,kd=Math.tan,Nd={Feature:function(t,n){Ke(t.geometry,n)},FeatureCollection:function(t,n){for(var e=t.features,r=-1,i=e.length;++rad?Dp=90:Pd<-ad&&(qp=-90),jp[0]=Lp,jp[1]=Up}},Ld={sphere:Qe,point:Mr,lineStart:kr,lineEnd:Er,polygonStart:function(){Ld.lineStart=Ar,Ld.lineEnd=Cr},polygonEnd:function(){Ld.lineStart=kr,Ld.lineEnd=Er}},qd=function(t){return function(){return t}},Ud=function(t,n){function e(e,r){return e=t(e,r),n(e[0],e[1])}return t.invert&&n.invert&&(e.invert=function(e,r){return(e=n.invert(e,r))&&t.invert(e[0],e[1])}),e};Rr.invert=Rr;var Dd,Od,Fd,Id,Yd,Bd,jd,Hd,Xd,$d,Vd,Wd=function(t){function n(n){return n=t(n[0]*pd,n[1]*pd),n[0]*=hd,n[1]*=hd,n}return t=Lr(t[0]*pd,t[1]*pd,t.length>2?t[2]*pd:0),n.invert=function(n){return n=t.invert(n[0]*pd,n[1]*pd),n[0]*=hd,n[1]*=hd,n},n},Zd=function(){var t,n=[];return{point:function(n,e){t.push([n,e])},lineStart:function(){n.push(t=[])},lineEnd:Qe,rejoin:function(){n.length>1&&n.push(n.pop().concat(n.shift()))},result:function(){var e=n;return n=[],t=null,e}}},Gd=function(t,n,e,r,i,o){var u,a=t[0],c=t[1],s=0,f=1,l=n[0]-a,h=n[1]-c;if(u=e-a,l||!(u>0)){if(u/=l,l<0){if(u0){if(u>f)return;u>s&&(s=u)}if(u=i-a,l||!(u<0)){if(u/=l,l<0){if(u>f)return;u>s&&(s=u)}else if(l>0){if(u0)){if(u/=h,h<0){if(u0){if(u>f)return;u>s&&(s=u)}if(u=o-c,h||!(u<0)){if(u/=h,h<0){if(u>f)return;u>s&&(s=u)}else if(h>0){if(u0&&(t[0]=a+s*l,t[1]=c+s*h),f<1&&(n[0]=a+f*l,n[1]=c+f*h),!0}}}}},Jd=function(t,n){return dd(t[0]-n[0])=0;--o)i.point((f=s[o])[0],f[1]);else r(h.x,h.p.x,-1,i);h=h.p}s=(h=h.o).z,p=!p}while(!h.v);i.lineEnd()}}},Kd=1e9,tv=-Kd,nv=Ep(),ev=function(t,n){var e=n[0],r=n[1],i=[wd(e),-yd(e),0],o=0,u=0;nv.reset();for(var a=0,c=t.length;a=0?1:-1,T=M*w,k=T>cd,N=d*x;if(nv.add(_d(N*M*wd(T),v*b+N*yd(T))),o+=k?w+M*ld:w,k^h>=e^g>=e){var S=sr(ar(l),ar(y));hr(S);var E=sr(i,S);hr(E);var A=(k^w>=0?-1:1)*Ge(E[2]);(r>A||r===A&&(S[0]||S[1]))&&(u+=k^w>=0?1:-1)}}return(o<-ad||oyv&&(yv=t),n<_v&&(_v=n),n>gv&&(gv=n)},lineStart:Qe,lineEnd:Qe,polygonStart:Qe,polygonEnd:Qe,result:function(){var t=[[vv,_v],[yv,gv]];return yv=gv=-(_v=vv=1/0),t}},xv=0,bv=0,wv=0,Mv=0,Tv=0,kv=0,Nv=0,Sv=0,Ev=0,Av={point:oi,lineStart:ui,lineEnd:si,polygonStart:function(){Av.lineStart=fi,Av.lineEnd=li},polygonEnd:function(){Av.point=oi,Av.lineStart=ui,Av.lineEnd=si},result:function(){var t=Ev?[Nv/Ev,Sv/Ev]:kv?[Mv/kv,Tv/kv]:wv?[xv/wv,bv/wv]:[NaN,NaN];return xv=bv=wv=Mv=Tv=kv=Nv=Sv=Ev=0,t}};di.prototype={_radius:4.5,pointRadius:function(t){return this._radius=t,this},polygonStart:function(){this._line=0},polygonEnd:function(){this._line=NaN},lineStart:function(){this._point=0},lineEnd:function(){0===this._line&&this._context.closePath(),this._point=NaN},point:function(t,n){switch(this._point){case 0:this._context.moveTo(t,n),this._point=1;break;case 1:this._context.lineTo(t,n);break;default:this._context.moveTo(t+this._radius,n),this._context.arc(t,n,this._radius,0,ld)}},result:Qe};var Cv,zv,Pv,Rv,Lv,qv=Ep(),Uv={point:Qe,lineStart:function(){Uv.point=vi},lineEnd:function(){Cv&&_i(zv,Pv),Uv.point=Qe},polygonStart:function(){Cv=!0},polygonEnd:function(){Cv=null},result:function(){var t=+qv;return qv.reset(),t}};yi.prototype={_radius:4.5,_circle:gi(4.5),pointRadius:function(t){return(t=+t)!==this._radius&&(this._radius=t,this._circle=null),this},polygonStart:function(){this._line=0},polygonEnd:function(){this._line=NaN},lineStart:function(){this._point=0},lineEnd:function(){0===this._line&&this._string.push("Z"),this._point=NaN},point:function(t,n){switch(this._point){case 0:this._string.push("M",t,",",n),this._point=1;break;case 1:this._string.push("L",t,",",n);break;default:null==this._circle&&(this._circle=gi(this._radius)),this._string.push("M",t,",",n,this._circle)}},result:function(){if(this._string.length){var t=this._string.join("");return this._string=[],t}return null}};var Dv=function(t,n,e,r){return function(i,o){function u(n,e){var r=i(n,e);t(n=r[0],e=r[1])&&o.point(n,e)}function a(t,n){var e=i(t,n);_.point(e[0],e[1])}function c(){b.point=a,_.lineStart()}function s(){b.point=u,_.lineEnd()}function f(t,n){v.push([t,n]);var e=i(t,n);m.point(e[0],e[1])}function l(){m.lineStart(),v=[]}function h(){f(v[0][0],v[0][1]),m.lineEnd();var t,n,e,r,i=m.clean(),u=g.result(),a=u.length;if(v.pop(),p.push(v),v=null,a)if(1&i){if(e=u[0],(n=e.length-1)>0){for(x||(o.polygonStart(),x=!0),o.lineStart(),t=0;t1&&2&i&&u.push(u.pop().concat(u.shift())),d.push(u.filter(mi))}var p,d,v,_=n(o),y=i.invert(r[0],r[1]),g=Zd(),m=n(g),x=!1,b={point:u,lineStart:c,lineEnd:s,polygonStart:function(){b.point=f,b.lineStart=l,b.lineEnd=h,d=[],p=[]},polygonEnd:function(){b.point=u,b.lineStart=c,b.lineEnd=s,d=Cs(d);var t=ev(p,y);d.length?(x||(o.polygonStart(),x=!0),Qd(d,xi,t,e,o)):t&&(x||(o.polygonStart(),x=!0),o.lineStart(),e(null,null,1,o),o.lineEnd()),x&&(o.polygonEnd(),x=!1),d=p=null},sphere:function(){o.polygonStart(),o.lineStart(),e(null,null,1,o),o.lineEnd(),o.polygonEnd()}};return b}},Ov=Dv(function(){return!0},function(t){var n,e=NaN,r=NaN,i=NaN;return{lineStart:function(){t.lineStart(),n=1},point:function(o,u){var a=o>0?cd:-cd,c=dd(o-e);dd(c-cd)0?sd:-sd),t.point(i,r),t.lineEnd(),t.lineStart(),t.point(a,r),t.point(o,r),n=0):i!==a&&c>=cd&&(dd(e-i)ad){var o=t[0]o}function r(t,n,e){var r=[1,0,0],i=sr(ar(t),ar(n)),u=cr(i,i),a=i[0],c=u-a*a;if(!c)return!e&&t;var s=o*u/c,f=-o*a/c,l=sr(r,i),h=lr(r,s);fr(h,lr(i,f));var p=l,d=cr(h,p),v=cr(p,p),_=d*d-v*(cr(h,h)-1);if(!(_<0)){var y=Td(_),g=lr(p,(-d-y)/v);if(fr(g,h),g=ur(g),!e)return g;var m,x=t[0],b=n[0],w=t[1],M=n[1];b0^g[1]<(dd(g[0]-x)cd^(x<=g[0]&&g[0]<=b)){var S=lr(p,(-d+y)/v);return fr(S,h),[g,ur(S)]}}}function i(n,e){var r=u?t:cd-t,i=0;return n<-r?i|=1:n>r&&(i|=2),e<-r?i|=4:e>r&&(i|=8),i}var o=yd(t),u=o>0,a=dd(o)>ad;return Dv(e,function(t){var n,o,c,s,f;return{lineStart:function(){s=c=!1,f=1},point:function(l,h){var p,d=[l,h],v=e(l,h),_=u?v?0:i(l,h):v?i(l+(l<0?cd:-cd),h):0;if(!n&&(s=c=v)&&t.lineStart(),v!==c&&(!(p=r(n,d))||Jd(n,p)||Jd(d,p))&&(d[0]+=ad,d[1]+=ad,v=e(d[0],d[1])),v!==c)f=0,v?(t.lineStart(),p=r(d,n),t.point(p[0],p[1])):(p=r(n,d),t.point(p[0],p[1]),t.lineEnd()),n=p;else if(a&&n&&u^v){var y;_&o||!(y=r(d,n,!0))||(f=0,u?(t.lineStart(),t.point(y[0][0],y[0][1]),t.point(y[1][0],y[1][1]),t.lineEnd()):(t.point(y[1][0],y[1][1]),t.lineEnd(),t.lineStart(),t.point(y[0][0],y[0][1])))}!v||n&&Jd(n,d)||t.point(d[0],d[1]),n=d,c=v,o=_},lineEnd:function(){c&&t.lineEnd(),n=null},clean:function(){return f|(s&&c)<<1}}},function(e,r,i,o){Or(o,t,n,i,e,r)},u?[0,-t]:[-cd,t-cd])};Mi.prototype={constructor:Mi,point:function(t,n){this.stream.point(t,n)},sphere:function(){this.stream.sphere()},lineStart:function(){this.stream.lineStart()},lineEnd:function(){this.stream.lineEnd()},polygonStart:function(){this.stream.polygonStart()},polygonEnd:function(){this.stream.polygonEnd()}};var Iv=16,Yv=yd(30*pd),Bv=function(t,n){return+n?Si(t,n):Ni(t)},jv=wi({point:function(t,n){this.stream.point(t*pd,n*pd)}}),Hv=function(){return Ci(Pi).scale(155.424).center([0,33.6442])},Xv=function(){return Hv().parallels([29.5,45.5]).scale(1070).translate([480,250]).rotate([96,0]).center([-.6,38.7])},$v=Li(function(t){return Td(2/(1+t))});$v.invert=qi(function(t){return 2*Ge(t/2)});var Vv=Li(function(t){return(t=Ze(t))&&t/wd(t)});Vv.invert=qi(function(t){return t});Ui.invert=function(t,n){return[t,2*vd(md(n))-sd]};Ii.invert=Ii;Bi.invert=qi(vd);Hi.invert=qi(Ge);Xi.invert=qi(function(t){return 2*vd(t)});$i.invert=function(t,n){return[-n,2*vd(md(t))-sd]};uo.prototype=eo.prototype={constructor:uo,count:function(){return this.eachAfter(to)},each:function(t){var n,e,r,i,o=this,u=[o];do{for(n=u.reverse(),u=[];o=n.pop();)if(t(o),e=o.children)for(r=0,i=e.length;r=0;--e)i.push(n[e]);return this},sum:function(t){return this.eachAfter(function(n){for(var e=+t(n.data)||0,r=n.children,i=r&&r.length;--i>=0;)e+=r[i].value;n.value=e})},sort:function(t){return this.eachBefore(function(n){n.children&&n.children.sort(t)})},path:function(t){for(var n=this,e=no(n,t),r=[n];n!==e;)n=n.parent,r.push(n);for(var i=r.length;t!==e;)r.splice(i,0,t),t=t.parent;return r},ancestors:function(){for(var t=this,n=[t];t=t.parent;)n.push(t);return n},descendants:function(){var t=[];return this.each(function(n){t.push(n)}),t},leaves:function(){var t=[];return this.eachBefore(function(n){n.children||t.push(n)}),t},links:function(){var t=this,n=[];return t.each(function(e){e!==t&&n.push({source:e.parent,target:e})}),n},copy:function(){return eo(this).eachBefore(io)}};var Wv=Array.prototype.slice,Zv=function(t){for(var n,e,r=0,i=(t=ao(Wv.call(t))).length,o=[];r1?n:1)},e}(r_),o_=function t(n){function e(t,e,r,i,o){if((u=t._squarify)&&u.ratio===n)for(var u,a,c,s,f,l=-1,h=u.length,p=t.value;++l1?n:1)},e}(r_),u_=function(t,n,e){return(n[0]-t[0])*(e[1]-t[1])-(n[1]-t[1])*(e[0]-t[0])},a_=[].slice,c_={};Bo.prototype=Wo.prototype={constructor:Bo,defer:function(t){if("function"!=typeof t)throw new Error("invalid callback");if(this._call)throw new Error("defer after await");if(null!=this._error)return this;var n=a_.call(arguments,1);return n.push(t),++this._waiting,this._tasks.push(n),jo(this),this},abort:function(){return null==this._error&&$o(this,new Error("abort")),this},await:function(t){if("function"!=typeof t)throw new Error("invalid callback");if(this._call)throw new Error("multiple await");return this._call=function(n,e){t.apply(null,[n].concat(e))},Vo(this),this},awaitAll:function(t){if("function"!=typeof t)throw new Error("invalid callback");if(this._call)throw new Error("multiple await");return this._call=t,Vo(this),this}};var s_=function(){return Math.random()},f_=function t(n){function e(t,e){return t=null==t?0:+t,e=null==e?1:+e,1===arguments.length?(e=t,t=0):e-=t,function(){return n()*e+t}}return e.source=t,e}(s_),l_=function t(n){function e(t,e){var r,i;return t=null==t?0:+t,e=null==e?1:+e,function(){var o;if(null!=r)o=r,r=null;else do{r=2*n()-1,o=2*n()-1,i=r*r+o*o}while(!i||i>1);return t+e*o*Math.sqrt(-2*Math.log(i)/i)}}return e.source=t,e}(s_),h_=function t(n){function e(){var t=l_.source(n).apply(this,arguments);return function(){return Math.exp(t())}}return e.source=t,e}(s_),p_=function t(n){function e(t){return function(){for(var e=0,r=0;r=200&&e<300||304===e){if(o)try{n=o.call(r,s)}catch(t){return void a.call("error",r,t)}else n=s;a.call("load",r,n)}else a.call("error",r,t)}var r,i,o,u,a=h("beforesend","progress","load","error"),c=we(),s=new XMLHttpRequest,f=null,l=null,p=0;if("undefined"==typeof XDomainRequest||"withCredentials"in s||!/^(http(s)?:)?\/\//.test(t)||(s=new XDomainRequest),"onload"in s?s.onload=s.onerror=s.ontimeout=e:s.onreadystatechange=function(t){s.readyState>3&&e(t)},s.onprogress=function(t){a.call("progress",r,t)},r={header:function(t,n){return t=(t+"").toLowerCase(),arguments.length<2?c.get(t):(null==n?c.remove(t):c.set(t,n+""),r)},mimeType:function(t){return arguments.length?(i=null==t?null:t+"",r):i},responseType:function(t){return arguments.length?(u=t,r):u},timeout:function(t){return arguments.length?(p=+t,r):p},user:function(t){return arguments.length<1?f:(f=null==t?null:t+"",r)},password:function(t){return arguments.length<1?l:(l=null==t?null:t+"",r)},response:function(t){return o=t,r},get:function(t,n){return r.send("GET",t,n)},post:function(t,n){return r.send("POST",t,n)},send:function(n,e,o){return s.open(n,t,!0,f,l),null==i||c.has("accept")||c.set("accept",i+",*/*"),s.setRequestHeader&&c.each(function(t,n){s.setRequestHeader(n,t)}),null!=i&&s.overrideMimeType&&s.overrideMimeType(i),null!=u&&(s.responseType=u),p>0&&(s.timeout=p),null==o&&"function"==typeof e&&(o=e,e=null),null!=o&&1===o.length&&(o=Zo(o)),null!=o&&r.on("error",o).on("load",function(t){o(null,t)}),a.call("beforesend",r,s),s.send(null==e?null:e),r},abort:function(){return s.abort(),r},on:function(){var t=a.on.apply(a,arguments);return t===a?r:t}},null!=n){if("function"!=typeof n)throw new Error("invalid callback: "+n);return r.get(n)}return r},y_=function(t,n){return function(e,r){var i=__(e).mimeType(t).response(n);if(null!=r){if("function"!=typeof r)throw new Error("invalid callback: "+r);return i.get(r)}return i}},g_=y_("text/html",function(t){return document.createRange().createContextualFragment(t.responseText)}),m_=y_("application/json",function(t){return JSON.parse(t.responseText)}),x_=y_("text/plain",function(t){return t.responseText}),b_=y_("application/xml",function(t){var n=t.responseXML;if(!n)throw new Error("parse error");return n}),w_=function(t,n){return function(e,r,i){arguments.length<3&&(i=r,r=null);var o=__(e).mimeType(t);return o.row=function(t){return arguments.length?o.response(Jo(n,r=t)):r},o.row(r),i?o.get(i):o}},M_=w_("text/csv",Qh),T_=w_("text/tab-separated-values",rp),k_=Array.prototype,N_=k_.map,S_=k_.slice,E_={name:"implicit"},A_=function(t){return function(){return t}},C_=function(t){return+t},z_=[0,1],P_=function(n,e,r){var o,u=n[0],a=n[n.length-1],c=i(u,a,null==e?10:e);switch((r=He(null==r?",f":r)).type){case"s":var s=Math.max(Math.abs(u),Math.abs(a));return null!=r.precision||isNaN(o=Np(c,s))||(r.precision=o),t.formatPrefix(r,s);case"":case"e":case"g":case"p":case"r":null!=r.precision||isNaN(o=Sp(c,Math.max(Math.abs(u),Math.abs(a))))||(r.precision=o-("e"===r.type));break;case"f":case"%":null!=r.precision||isNaN(o=kp(c))||(r.precision=o-2*("%"===r.type))}return t.format(r)},R_=function(t,n){var e,r=0,i=(t=t.slice()).length-1,o=t[r],u=t[i];return u0?t>1?Mu(function(n){n.setTime(Math.floor(n/t)*t)},function(n,e){n.setTime(+n+e*t)},function(n,e){return(e-n)/t}):U_:null};var D_=U_.range,O_=6e4,F_=6048e5,I_=Mu(function(t){t.setTime(1e3*Math.floor(t/1e3))},function(t,n){t.setTime(+t+1e3*n)},function(t,n){return(n-t)/1e3},function(t){return t.getUTCSeconds()}),Y_=I_.range,B_=Mu(function(t){t.setTime(Math.floor(t/O_)*O_)},function(t,n){t.setTime(+t+n*O_)},function(t,n){return(n-t)/O_},function(t){return t.getMinutes()}),j_=B_.range,H_=Mu(function(t){var n=t.getTimezoneOffset()*O_%36e5;n<0&&(n+=36e5),t.setTime(36e5*Math.floor((+t-n)/36e5)+n)},function(t,n){t.setTime(+t+36e5*n)},function(t,n){return(n-t)/36e5},function(t){return t.getHours()}),X_=H_.range,$_=Mu(function(t){t.setHours(0,0,0,0)},function(t,n){t.setDate(t.getDate()+n)},function(t,n){return(n-t-(n.getTimezoneOffset()-t.getTimezoneOffset())*O_)/864e5},function(t){return t.getDate()-1}),V_=$_.range,W_=Tu(0),Z_=Tu(1),G_=Tu(2),J_=Tu(3),Q_=Tu(4),K_=Tu(5),ty=Tu(6),ny=W_.range,ey=Z_.range,ry=G_.range,iy=J_.range,oy=Q_.range,uy=K_.range,ay=ty.range,cy=Mu(function(t){t.setDate(1),t.setHours(0,0,0,0)},function(t,n){t.setMonth(t.getMonth()+n)},function(t,n){return n.getMonth()-t.getMonth()+12*(n.getFullYear()-t.getFullYear())},function(t){return t.getMonth()}),sy=cy.range,fy=Mu(function(t){t.setMonth(0,1),t.setHours(0,0,0,0)},function(t,n){t.setFullYear(t.getFullYear()+n)},function(t,n){return n.getFullYear()-t.getFullYear()},function(t){return t.getFullYear()});fy.every=function(t){return isFinite(t=Math.floor(t))&&t>0?Mu(function(n){n.setFullYear(Math.floor(n.getFullYear()/t)*t),n.setMonth(0,1),n.setHours(0,0,0,0)},function(n,e){n.setFullYear(n.getFullYear()+e*t)}):null};var ly=fy.range,hy=Mu(function(t){t.setUTCSeconds(0,0)},function(t,n){t.setTime(+t+n*O_)},function(t,n){return(n-t)/O_},function(t){return t.getUTCMinutes()}),py=hy.range,dy=Mu(function(t){t.setUTCMinutes(0,0,0)},function(t,n){t.setTime(+t+36e5*n)},function(t,n){return(n-t)/36e5},function(t){return t.getUTCHours()}),vy=dy.range,_y=Mu(function(t){t.setUTCHours(0,0,0,0)},function(t,n){t.setUTCDate(t.getUTCDate()+n)},function(t,n){return(n-t)/864e5},function(t){return t.getUTCDate()-1}),yy=_y.range,gy=ku(0),my=ku(1),xy=ku(2),by=ku(3),wy=ku(4),My=ku(5),Ty=ku(6),ky=gy.range,Ny=my.range,Sy=xy.range,Ey=by.range,Ay=wy.range,Cy=My.range,zy=Ty.range,Py=Mu(function(t){t.setUTCDate(1),t.setUTCHours(0,0,0,0)},function(t,n){t.setUTCMonth(t.getUTCMonth()+n)},function(t,n){return n.getUTCMonth()-t.getUTCMonth()+12*(n.getUTCFullYear()-t.getUTCFullYear())},function(t){return t.getUTCMonth()}),Ry=Py.range,Ly=Mu(function(t){t.setUTCMonth(0,1),t.setUTCHours(0,0,0,0)},function(t,n){t.setUTCFullYear(t.getUTCFullYear()+n)},function(t,n){return n.getUTCFullYear()-t.getUTCFullYear()},function(t){return t.getUTCFullYear()});Ly.every=function(t){return isFinite(t=Math.floor(t))&&t>0?Mu(function(n){n.setUTCFullYear(Math.floor(n.getUTCFullYear()/t)*t),n.setUTCMonth(0,1),n.setUTCHours(0,0,0,0)},function(n,e){n.setUTCFullYear(n.getUTCFullYear()+e*t)}):null};var qy,Uy=Ly.range,Dy={"-":"",_:" ",0:"0"},Oy=/^\s*\d+/,Fy=/^%/,Iy=/[\\\^\$\*\+\?\|\[\]\(\)\.\{\}]/g;Ma({dateTime:"%x, %X",date:"%-m/%-d/%Y",time:"%-I:%M:%S %p",periods:["AM","PM"],days:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"],shortDays:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],months:["January","February","March","April","May","June","July","August","September","October","November","December"],shortMonths:["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"]});var Yy=Date.prototype.toISOString?function(t){return t.toISOString()}:t.utcFormat("%Y-%m-%dT%H:%M:%S.%LZ"),By=+new Date("2000-01-01T00:00:00.000Z")?function(t){var n=new Date(t);return isNaN(n)?null:n}:t.utcParse("%Y-%m-%dT%H:%M:%S.%LZ"),jy=1e3,Hy=60*jy,Xy=60*Hy,$y=24*Xy,Vy=7*$y,Wy=30*$y,Zy=365*$y,Gy=function(t){return t.match(/.{6}/g).map(function(t){return"#"+t})},Jy=Gy("1f77b4ff7f0e2ca02cd627289467bd8c564be377c27f7f7fbcbd2217becf"),Qy=Gy("393b795254a36b6ecf9c9ede6379398ca252b5cf6bcedb9c8c6d31bd9e39e7ba52e7cb94843c39ad494ad6616be7969c7b4173a55194ce6dbdde9ed6"),Ky=Gy("3182bd6baed69ecae1c6dbefe6550dfd8d3cfdae6bfdd0a231a35474c476a1d99bc7e9c0756bb19e9ac8bcbddcdadaeb636363969696bdbdbdd9d9d9"),tg=Gy("1f77b4aec7e8ff7f0effbb782ca02c98df8ad62728ff98969467bdc5b0d58c564bc49c94e377c2f7b6d27f7f7fc7c7c7bcbd22dbdb8d17becf9edae5"),ng=wl($t(300,.5,0),$t(-240,.5,1)),eg=wl($t(-100,.75,.35),$t(80,1.5,.8)),rg=wl($t(260,.75,.35),$t(80,1.5,.8)),ig=$t(),og=Sa(Gy("44015444025645045745055946075a46085c460a5d460b5e470d60470e6147106347116447136548146748166848176948186a481a6c481b6d481c6e481d6f481f70482071482173482374482475482576482677482878482979472a7a472c7a472d7b472e7c472f7d46307e46327e46337f463480453581453781453882443983443a83443b84433d84433e85423f854240864241864142874144874045884046883f47883f48893e49893e4a893e4c8a3d4d8a3d4e8a3c4f8a3c508b3b518b3b528b3a538b3a548c39558c39568c38588c38598c375a8c375b8d365c8d365d8d355e8d355f8d34608d34618d33628d33638d32648e32658e31668e31678e31688e30698e306a8e2f6b8e2f6c8e2e6d8e2e6e8e2e6f8e2d708e2d718e2c718e2c728e2c738e2b748e2b758e2a768e2a778e2a788e29798e297a8e297b8e287c8e287d8e277e8e277f8e27808e26818e26828e26828e25838e25848e25858e24868e24878e23888e23898e238a8d228b8d228c8d228d8d218e8d218f8d21908d21918c20928c20928c20938c1f948c1f958b1f968b1f978b1f988b1f998a1f9a8a1e9b8a1e9c891e9d891f9e891f9f881fa0881fa1881fa1871fa28720a38620a48621a58521a68522a78522a88423a98324aa8325ab8225ac8226ad8127ad8128ae8029af7f2ab07f2cb17e2db27d2eb37c2fb47c31b57b32b67a34b67935b77937b87838b9773aba763bbb753dbc743fbc7340bd7242be7144bf7046c06f48c16e4ac16d4cc26c4ec36b50c46a52c56954c56856c66758c7655ac8645cc8635ec96260ca6063cb5f65cb5e67cc5c69cd5b6ccd5a6ece5870cf5773d05675d05477d1537ad1517cd2507fd34e81d34d84d44b86d54989d5488bd6468ed64590d74393d74195d84098d83e9bd93c9dd93ba0da39a2da37a5db36a8db34aadc32addc30b0dd2fb2dd2db5de2bb8de29bade28bddf26c0df25c2df23c5e021c8e020cae11fcde11dd0e11cd2e21bd5e21ad8e219dae319dde318dfe318e2e418e5e419e7e419eae51aece51befe51cf1e51df4e61ef6e620f8e621fbe723fde725")),ug=Sa(Gy("00000401000501010601010802010902020b02020d03030f03031204041405041606051806051a07061c08071e0907200a08220b09240c09260d0a290e0b2b100b2d110c2f120d31130d34140e36150e38160f3b180f3d19103f1a10421c10441d11471e114920114b21114e22115024125325125527125829115a2a115c2c115f2d11612f116331116533106734106936106b38106c390f6e3b0f703d0f713f0f72400f74420f75440f764510774710784910784a10794c117a4e117b4f127b51127c52137c54137d56147d57157e59157e5a167e5c167f5d177f5f187f601880621980641a80651a80671b80681c816a1c816b1d816d1d816e1e81701f81721f817320817521817621817822817922827b23827c23827e24828025828125818326818426818627818827818928818b29818c29818e2a81902a81912b81932b80942c80962c80982d80992d809b2e7f9c2e7f9e2f7fa02f7fa1307ea3307ea5317ea6317da8327daa337dab337cad347cae347bb0357bb2357bb3367ab5367ab73779b83779ba3878bc3978bd3977bf3a77c03a76c23b75c43c75c53c74c73d73c83e73ca3e72cc3f71cd4071cf4070d0416fd2426fd3436ed5446dd6456cd8456cd9466bdb476adc4869de4968df4a68e04c67e24d66e34e65e44f64e55064e75263e85362e95462ea5661eb5760ec5860ed5a5fee5b5eef5d5ef05f5ef1605df2625df2645cf3655cf4675cf4695cf56b5cf66c5cf66e5cf7705cf7725cf8745cf8765cf9785df9795df97b5dfa7d5efa7f5efa815ffb835ffb8560fb8761fc8961fc8a62fc8c63fc8e64fc9065fd9266fd9467fd9668fd9869fd9a6afd9b6bfe9d6cfe9f6dfea16efea36ffea571fea772fea973feaa74feac76feae77feb078feb27afeb47bfeb67cfeb77efeb97ffebb81febd82febf84fec185fec287fec488fec68afec88cfeca8dfecc8ffecd90fecf92fed194fed395fed597fed799fed89afdda9cfddc9efddea0fde0a1fde2a3fde3a5fde5a7fde7a9fde9aafdebacfcecaefceeb0fcf0b2fcf2b4fcf4b6fcf6b8fcf7b9fcf9bbfcfbbdfcfdbf")),ag=Sa(Gy("00000401000501010601010802010a02020c02020e03021004031204031405041706041907051b08051d09061f0a07220b07240c08260d08290e092b10092d110a30120a32140b34150b37160b39180c3c190c3e1b0c411c0c431e0c451f0c48210c4a230c4c240c4f260c51280b53290b552b0b572d0b592f0a5b310a5c320a5e340a5f3609613809623909633b09643d09653e0966400a67420a68440a68450a69470b6a490b6a4a0c6b4c0c6b4d0d6c4f0d6c510e6c520e6d540f6d550f6d57106e59106e5a116e5c126e5d126e5f136e61136e62146e64156e65156e67166e69166e6a176e6c186e6d186e6f196e71196e721a6e741a6e751b6e771c6d781c6d7a1d6d7c1d6d7d1e6d7f1e6c801f6c82206c84206b85216b87216b88226a8a226a8c23698d23698f24699025689225689326679526679727669827669a28659b29649d29649f2a63a02a63a22b62a32c61a52c60a62d60a82e5fa92e5eab2f5ead305dae305cb0315bb1325ab3325ab43359b63458b73557b93556ba3655bc3754bd3853bf3952c03a51c13a50c33b4fc43c4ec63d4dc73e4cc83f4bca404acb4149cc4248ce4347cf4446d04545d24644d34743d44842d54a41d74b3fd84c3ed94d3dda4e3cdb503bdd513ade5238df5337e05536e15635e25734e35933e45a31e55c30e65d2fe75e2ee8602de9612bea632aeb6429eb6628ec6726ed6925ee6a24ef6c23ef6e21f06f20f1711ff1731df2741cf3761bf37819f47918f57b17f57d15f67e14f68013f78212f78410f8850ff8870ef8890cf98b0bf98c0af98e09fa9008fa9207fa9407fb9606fb9706fb9906fb9b06fb9d07fc9f07fca108fca309fca50afca60cfca80dfcaa0ffcac11fcae12fcb014fcb216fcb418fbb61afbb81dfbba1ffbbc21fbbe23fac026fac228fac42afac62df9c72ff9c932f9cb35f8cd37f8cf3af7d13df7d340f6d543f6d746f5d949f5db4cf4dd4ff4df53f4e156f3e35af3e55df2e661f2e865f2ea69f1ec6df1ed71f1ef75f1f179f2f27df2f482f3f586f3f68af4f88ef5f992f6fa96f8fb9af9fc9dfafda1fcffa4")),cg=Sa(Gy("0d088710078813078916078a19068c1b068d1d068e20068f2206902406912605912805922a05932c05942e05952f059631059733059735049837049938049a3a049a3c049b3e049c3f049c41049d43039e44039e46039f48039f4903a04b03a14c02a14e02a25002a25102a35302a35502a45601a45801a45901a55b01a55c01a65e01a66001a66100a76300a76400a76600a76700a86900a86a00a86c00a86e00a86f00a87100a87201a87401a87501a87701a87801a87a02a87b02a87d03a87e03a88004a88104a78305a78405a78606a68707a68808a68a09a58b0aa58d0ba58e0ca48f0da4910ea3920fa39410a29511a19613a19814a099159f9a169f9c179e9d189d9e199da01a9ca11b9ba21d9aa31e9aa51f99a62098a72197a82296aa2395ab2494ac2694ad2793ae2892b02991b12a90b22b8fb32c8eb42e8db52f8cb6308bb7318ab83289ba3388bb3488bc3587bd3786be3885bf3984c03a83c13b82c23c81c33d80c43e7fc5407ec6417dc7427cc8437bc9447aca457acb4679cc4778cc4977cd4a76ce4b75cf4c74d04d73d14e72d24f71d35171d45270d5536fd5546ed6556dd7566cd8576bd9586ada5a6ada5b69db5c68dc5d67dd5e66de5f65de6164df6263e06363e16462e26561e26660e3685fe4695ee56a5de56b5de66c5ce76e5be76f5ae87059e97158e97257ea7457eb7556eb7655ec7754ed7953ed7a52ee7b51ef7c51ef7e50f07f4ff0804ef1814df1834cf2844bf3854bf3874af48849f48948f58b47f58c46f68d45f68f44f79044f79143f79342f89441f89540f9973ff9983ef99a3efa9b3dfa9c3cfa9e3bfb9f3afba139fba238fca338fca537fca636fca835fca934fdab33fdac33fdae32fdaf31fdb130fdb22ffdb42ffdb52efeb72dfeb82cfeba2cfebb2bfebd2afebe2afec029fdc229fdc328fdc527fdc627fdc827fdca26fdcb26fccd25fcce25fcd025fcd225fbd324fbd524fbd724fad824fada24f9dc24f9dd25f8df25f8e125f7e225f7e425f6e626f6e826f5e926f5eb27f4ed27f3ee27f3f027f2f227f1f426f1f525f0f724f0f921")),sg=function(t){return function(){return t}},fg=Math.abs,lg=Math.atan2,hg=Math.cos,pg=Math.max,dg=Math.min,vg=Math.sin,_g=Math.sqrt,yg=1e-12,gg=Math.PI,mg=gg/2,xg=2*gg;Oa.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._point=0},lineEnd:function(){(this._line||0!==this._line&&1===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,n){switch(t=+t,n=+n,this._point){case 0:this._point=1,this._line?this._context.lineTo(t,n):this._context.moveTo(t,n);break;case 1:this._point=2;default:this._context.lineTo(t,n)}}};var bg=function(t){return new Oa(t)},wg=function(){function t(t){var a,c,s,f=t.length,l=!1;for(null==i&&(u=o(s=ve())),a=0;a<=f;++a)!(a=f;--l)s.point(_[l],y[l]);s.lineEnd(),s.areaEnd()}v&&(_[n]=+e(h,n,t),y[n]=+i(h,n,t),s.point(r?+r(h,n,t):_[n],o?+o(h,n,t):y[n]))}if(p)return s=null,p+""||null}function n(){return wg().defined(u).curve(c).context(a)}var e=Fa,r=null,i=sg(0),o=Ia,u=sg(!0),a=null,c=bg,s=null;return t.x=function(n){return arguments.length?(e="function"==typeof n?n:sg(+n),r=null,t):e},t.x0=function(n){return arguments.length?(e="function"==typeof n?n:sg(+n),t):e},t.x1=function(n){return arguments.length?(r=null==n?null:"function"==typeof n?n:sg(+n),t):r},t.y=function(n){return arguments.length?(i="function"==typeof n?n:sg(+n),o=null,t):i},t.y0=function(n){return arguments.length?(i="function"==typeof n?n:sg(+n),t):i},t.y1=function(n){return arguments.length?(o=null==n?null:"function"==typeof n?n:sg(+n),t):o},t.lineX0=t.lineY0=function(){return n().x(e).y(i)},t.lineY1=function(){return n().x(e).y(o)},t.lineX1=function(){return n().x(r).y(i)},t.defined=function(n){return arguments.length?(u="function"==typeof n?n:sg(!!n),t):u},t.curve=function(n){return arguments.length?(c=n,null!=a&&(s=c(a)),t):c},t.context=function(n){return arguments.length?(null==n?a=s=null:s=c(a=n),t):a},t},Tg=function(t,n){return nt?1:n>=t?0:NaN},kg=function(t){return t},Ng=Ba(bg);Ya.prototype={areaStart:function(){this._curve.areaStart()},areaEnd:function(){this._curve.areaEnd()},lineStart:function(){this._curve.lineStart()},lineEnd:function(){this._curve.lineEnd()},point:function(t,n){this._curve.point(n*Math.sin(t),n*-Math.cos(t))}};var Sg=function(){return ja(wg().curve(Ng))},Eg=function(){var t=Mg().curve(Ng),n=t.curve,e=t.lineX0,r=t.lineX1,i=t.lineY0,o=t.lineY1;return t.angle=t.x,delete t.x,t.startAngle=t.x0,delete t.x0,t.endAngle=t.x1,delete t.x1,t.radius=t.y,delete t.y,t.innerRadius=t.y0,delete t.y0,t.outerRadius=t.y1,delete t.y1,t.lineStartAngle=function(){return ja(e())},delete t.lineX0,t.lineEndAngle=function(){return ja(r())},delete t.lineX1,t.lineInnerRadius=function(){return ja(i())},delete t.lineY0,t.lineOuterRadius=function(){return ja(o())},delete t.lineY1,t.curve=function(t){return arguments.length?n(Ba(t)):n()._curve},t},Ag=function(t,n){return[(n=+n)*Math.cos(t-=Math.PI/2),n*Math.sin(t)]},Cg=Array.prototype.slice,zg={draw:function(t,n){var e=Math.sqrt(n/gg);t.moveTo(e,0),t.arc(0,0,e,0,xg)}},Pg={draw:function(t,n){var e=Math.sqrt(n/5)/2;t.moveTo(-3*e,-e),t.lineTo(-e,-e),t.lineTo(-e,-3*e),t.lineTo(e,-3*e),t.lineTo(e,-e),t.lineTo(3*e,-e),t.lineTo(3*e,e),t.lineTo(e,e),t.lineTo(e,3*e),t.lineTo(-e,3*e),t.lineTo(-e,e),t.lineTo(-3*e,e),t.closePath()}},Rg=Math.sqrt(1/3),Lg=2*Rg,qg={draw:function(t,n){var e=Math.sqrt(n/Lg),r=e*Rg;t.moveTo(0,-e),t.lineTo(r,0),t.lineTo(0,e),t.lineTo(-r,0),t.closePath()}},Ug=Math.sin(gg/10)/Math.sin(7*gg/10),Dg=Math.sin(xg/10)*Ug,Og=-Math.cos(xg/10)*Ug,Fg={draw:function(t,n){var e=Math.sqrt(.8908130915292852*n),r=Dg*e,i=Og*e;t.moveTo(0,-e),t.lineTo(r,i);for(var o=1;o<5;++o){var u=xg*o/5,a=Math.cos(u),c=Math.sin(u);t.lineTo(c*e,-a*e),t.lineTo(a*r-c*i,c*r+a*i)}t.closePath()}},Ig={draw:function(t,n){var e=Math.sqrt(n),r=-e/2;t.rect(r,r,e,e)}},Yg=Math.sqrt(3),Bg={draw:function(t,n){var e=-Math.sqrt(n/(3*Yg));t.moveTo(0,2*e),t.lineTo(-Yg*e,-e),t.lineTo(Yg*e,-e),t.closePath()}},jg=-.5,Hg=Math.sqrt(3)/2,Xg=1/Math.sqrt(12),$g=3*(Xg/2+1),Vg={draw:function(t,n){var e=Math.sqrt(n/$g),r=e/2,i=e*Xg,o=r,u=e*Xg+e,a=-o,c=u;t.moveTo(r,i),t.lineTo(o,u),t.lineTo(a,c),t.lineTo(jg*r-Hg*i,Hg*r+jg*i),t.lineTo(jg*o-Hg*u,Hg*o+jg*u),t.lineTo(jg*a-Hg*c,Hg*a+jg*c),t.lineTo(jg*r+Hg*i,jg*i-Hg*r),t.lineTo(jg*o+Hg*u,jg*u-Hg*o),t.lineTo(jg*a+Hg*c,jg*c-Hg*a),t.closePath()}},Wg=[zg,Pg,qg,Ig,Fg,Bg,Vg],Zg=function(){};Ja.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._y0=this._y1=NaN,this._point=0},lineEnd:function(){switch(this._point){case 3:Ga(this,this._x1,this._y1);case 2:this._context.lineTo(this._x1,this._y1)}(this._line||0!==this._line&&1===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,n){switch(t=+t,n=+n,this._point){case 0:this._point=1,this._line?this._context.lineTo(t,n):this._context.moveTo(t,n);break;case 1:this._point=2;break;case 2:this._point=3,this._context.lineTo((5*this._x0+this._x1)/6,(5*this._y0+this._y1)/6);default:Ga(this,t,n)}this._x0=this._x1,this._x1=t,this._y0=this._y1,this._y1=n}};Qa.prototype={areaStart:Zg,areaEnd:Zg,lineStart:function(){this._x0=this._x1=this._x2=this._x3=this._x4=this._y0=this._y1=this._y2=this._y3=this._y4=NaN,this._point=0},lineEnd:function(){switch(this._point){case 1:this._context.moveTo(this._x2,this._y2),this._context.closePath();break;case 2:this._context.moveTo((this._x2+2*this._x3)/3,(this._y2+2*this._y3)/3),this._context.lineTo((this._x3+2*this._x2)/3,(this._y3+2*this._y2)/3),this._context.closePath();break;case 3:this.point(this._x2,this._y2),this.point(this._x3,this._y3),this.point(this._x4,this._y4)}},point:function(t,n){switch(t=+t,n=+n,this._point){case 0:this._point=1,this._x2=t,this._y2=n;break;case 1:this._point=2,this._x3=t,this._y3=n;break;case 2:this._point=3,this._x4=t,this._y4=n,this._context.moveTo((this._x0+4*this._x1+t)/6,(this._y0+4*this._y1+n)/6);break;default:Ga(this,t,n)}this._x0=this._x1,this._x1=t,this._y0=this._y1,this._y1=n}};Ka.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._y0=this._y1=NaN,this._point=0},lineEnd:function(){(this._line||0!==this._line&&3===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,n){switch(t=+t,n=+n,this._point){case 0:this._point=1;break;case 1:this._point=2;break;case 2:this._point=3;var e=(this._x0+4*this._x1+t)/6,r=(this._y0+4*this._y1+n)/6;this._line?this._context.lineTo(e,r):this._context.moveTo(e,r);break;case 3:this._point=4;default:Ga(this,t,n)}this._x0=this._x1,this._x1=t,this._y0=this._y1,this._y1=n}};tc.prototype={lineStart:function(){this._x=[],this._y=[],this._basis.lineStart()},lineEnd:function(){var t=this._x,n=this._y,e=t.length-1;if(e>0)for(var r,i=t[0],o=n[0],u=t[e]-i,a=n[e]-o,c=-1;++c<=e;)r=c/e,this._basis.point(this._beta*t[c]+(1-this._beta)*(i+r*u),this._beta*n[c]+(1-this._beta)*(o+r*a));this._x=this._y=null,this._basis.lineEnd()},point:function(t,n){this._x.push(+t),this._y.push(+n)}};var Gg=function t(n){function e(t){return 1===n?new Ja(t):new tc(t,n)}return e.beta=function(n){return t(+n)},e}(.85);ec.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._x2=this._y0=this._y1=this._y2=NaN,this._point=0},lineEnd:function(){switch(this._point){case 2:this._context.lineTo(this._x2,this._y2);break;case 3:nc(this,this._x1,this._y1)}(this._line||0!==this._line&&1===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,n){switch(t=+t,n=+n,this._point){case 0:this._point=1,this._line?this._context.lineTo(t,n):this._context.moveTo(t,n);break;case 1:this._point=2,this._x1=t,this._y1=n;break;case 2:this._point=3;default:nc(this,t,n)}this._x0=this._x1,this._x1=this._x2,this._x2=t,this._y0=this._y1,this._y1=this._y2,this._y2=n}};var Jg=function t(n){function e(t){return new ec(t,n)}return e.tension=function(n){return t(+n)},e}(0);rc.prototype={areaStart:Zg,areaEnd:Zg,lineStart:function(){this._x0=this._x1=this._x2=this._x3=this._x4=this._x5=this._y0=this._y1=this._y2=this._y3=this._y4=this._y5=NaN,this._point=0},lineEnd:function(){switch(this._point){case 1:this._context.moveTo(this._x3,this._y3),this._context.closePath();break;case 2:this._context.lineTo(this._x3,this._y3),this._context.closePath();break;case 3:this.point(this._x3,this._y3),this.point(this._x4,this._y4),this.point(this._x5,this._y5)}},point:function(t,n){switch(t=+t,n=+n,this._point){case 0:this._point=1,this._x3=t,this._y3=n;break;case 1:this._point=2,this._context.moveTo(this._x4=t,this._y4=n);break;case 2:this._point=3,this._x5=t,this._y5=n;break;default:nc(this,t,n)}this._x0=this._x1,this._x1=this._x2,this._x2=t,this._y0=this._y1,this._y1=this._y2,this._y2=n}};var Qg=function t(n){function e(t){return new rc(t,n)}return e.tension=function(n){return t(+n)},e}(0);ic.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._x2=this._y0=this._y1=this._y2=NaN,this._point=0},lineEnd:function(){(this._line||0!==this._line&&3===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,n){switch(t=+t,n=+n,this._point){case 0:this._point=1;break;case 1:this._point=2;break;case 2:this._point=3,this._line?this._context.lineTo(this._x2,this._y2):this._context.moveTo(this._x2,this._y2);break;case 3:this._point=4;default:nc(this,t,n)}this._x0=this._x1,this._x1=this._x2,this._x2=t,this._y0=this._y1,this._y1=this._y2,this._y2=n}};var Kg=function t(n){function e(t){return new ic(t,n)}return e.tension=function(n){return t(+n)},e}(0);uc.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._x2=this._y0=this._y1=this._y2=NaN,this._l01_a=this._l12_a=this._l23_a=this._l01_2a=this._l12_2a=this._l23_2a=this._point=0},lineEnd:function(){switch(this._point){case 2:this._context.lineTo(this._x2,this._y2);break;case 3:this.point(this._x2,this._y2)}(this._line||0!==this._line&&1===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,n){if(t=+t,n=+n,this._point){var e=this._x2-t,r=this._y2-n;this._l23_a=Math.sqrt(this._l23_2a=Math.pow(e*e+r*r,this._alpha))}switch(this._point){case 0:this._point=1,this._line?this._context.lineTo(t,n):this._context.moveTo(t,n);break;case 1:this._point=2;break;case 2:this._point=3;default:oc(this,t,n)}this._l01_a=this._l12_a,this._l12_a=this._l23_a,this._l01_2a=this._l12_2a,this._l12_2a=this._l23_2a,this._x0=this._x1,this._x1=this._x2,this._x2=t,this._y0=this._y1,this._y1=this._y2,this._y2=n}};var tm=function t(n){function e(t){return n?new uc(t,n):new ec(t,0)}return e.alpha=function(n){return t(+n)},e}(.5);ac.prototype={areaStart:Zg,areaEnd:Zg,lineStart:function(){this._x0=this._x1=this._x2=this._x3=this._x4=this._x5=this._y0=this._y1=this._y2=this._y3=this._y4=this._y5=NaN,this._l01_a=this._l12_a=this._l23_a=this._l01_2a=this._l12_2a=this._l23_2a=this._point=0},lineEnd:function(){switch(this._point){case 1:this._context.moveTo(this._x3,this._y3),this._context.closePath();break;case 2:this._context.lineTo(this._x3,this._y3),this._context.closePath();break;case 3:this.point(this._x3,this._y3),this.point(this._x4,this._y4),this.point(this._x5,this._y5)}},point:function(t,n){if(t=+t,n=+n,this._point){var e=this._x2-t,r=this._y2-n;this._l23_a=Math.sqrt(this._l23_2a=Math.pow(e*e+r*r,this._alpha))}switch(this._point){case 0:this._point=1,this._x3=t,this._y3=n;break;case 1:this._point=2,this._context.moveTo(this._x4=t,this._y4=n);break;case 2:this._point=3,this._x5=t,this._y5=n;break;default:oc(this,t,n)}this._l01_a=this._l12_a,this._l12_a=this._l23_a,this._l01_2a=this._l12_2a,this._l12_2a=this._l23_2a,this._x0=this._x1,this._x1=this._x2,this._x2=t,this._y0=this._y1,this._y1=this._y2,this._y2=n}};var nm=function t(n){function e(t){return n?new ac(t,n):new rc(t,0)}return e.alpha=function(n){return t(+n)},e}(.5);cc.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._x2=this._y0=this._y1=this._y2=NaN,this._l01_a=this._l12_a=this._l23_a=this._l01_2a=this._l12_2a=this._l23_2a=this._point=0},lineEnd:function(){(this._line||0!==this._line&&3===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,n){if(t=+t,n=+n,this._point){var e=this._x2-t,r=this._y2-n;this._l23_a=Math.sqrt(this._l23_2a=Math.pow(e*e+r*r,this._alpha))}switch(this._point){case 0:this._point=1;break;case 1:this._point=2;break;case 2:this._point=3,this._line?this._context.lineTo(this._x2,this._y2):this._context.moveTo(this._x2,this._y2);break;case 3:this._point=4;default:oc(this,t,n)}this._l01_a=this._l12_a,this._l12_a=this._l23_a,this._l01_2a=this._l12_2a,this._l12_2a=this._l23_2a,this._x0=this._x1,this._x1=this._x2,this._x2=t,this._y0=this._y1,this._y1=this._y2,this._y2=n}};var em=function t(n){function e(t){return n?new cc(t,n):new ic(t,0)}return e.alpha=function(n){return t(+n)},e}(.5);sc.prototype={areaStart:Zg,areaEnd:Zg,lineStart:function(){this._point=0},lineEnd:function(){this._point&&this._context.closePath()},point:function(t,n){t=+t,n=+n,this._point?this._context.lineTo(t,n):(this._point=1,this._context.moveTo(t,n))}};dc.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._y0=this._y1=this._t0=NaN,this._point=0},lineEnd:function(){switch(this._point){case 2:this._context.lineTo(this._x1,this._y1);break;case 3:pc(this,this._t0,hc(this,this._t0))}(this._line||0!==this._line&&1===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,n){var e=NaN;if(t=+t,n=+n,t!==this._x1||n!==this._y1){switch(this._point){case 0:this._point=1,this._line?this._context.lineTo(t,n):this._context.moveTo(t,n);break;case 1:this._point=2;break;case 2:this._point=3,pc(this,hc(this,e=lc(this,t,n)),e);break;default:pc(this,this._t0,e=lc(this,t,n))}this._x0=this._x1,this._x1=t,this._y0=this._y1,this._y1=n,this._t0=e}}},(vc.prototype=Object.create(dc.prototype)).point=function(t,n){dc.prototype.point.call(this,n,t)},_c.prototype={moveTo:function(t,n){this._context.moveTo(n,t)},closePath:function(){this._context.closePath()},lineTo:function(t,n){this._context.lineTo(n,t)},bezierCurveTo:function(t,n,e,r,i,o){this._context.bezierCurveTo(n,t,r,e,o,i)}},yc.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x=[],this._y=[]},lineEnd:function(){var t=this._x,n=this._y,e=t.length;if(e)if(this._line?this._context.lineTo(t[0],n[0]):this._context.moveTo(t[0],n[0]),2===e)this._context.lineTo(t[1],n[1]);else for(var r=gc(t),i=gc(n),o=0,u=1;u=0&&(this._t=1-this._t,this._line=1-this._line)},point:function(t,n){switch(t=+t,n=+n,this._point){case 0:this._point=1,this._line?this._context.lineTo(t,n):this._context.moveTo(t,n);break;case 1:this._point=2;default:if(this._t<=0)this._context.lineTo(this._x,n),this._context.lineTo(t,n);else{var e=this._x*(1-this._t)+t*this._t;this._context.lineTo(e,this._y),this._context.lineTo(e,n)}}this._x=t,this._y=n}};var rm=function(t,n){if((i=t.length)>1)for(var e,r,i,o=1,u=t[n[0]],a=u.length;o=0;)e[n]=n;return e},om=function(t){var n=t.map(bc);return im(t).sort(function(t,e){return n[t]-n[e]})},um=function(t){return function(){return t}};Tc.prototype={constructor:Tc,insert:function(t,n){var e,r,i;if(t){if(n.P=t,n.N=t.N,t.N&&(t.N.P=n),t.N=n,t.R){for(t=t.R;t.L;)t=t.L;t.L=n}else t.R=n;e=t}else this._?(t=Ec(this._),n.P=null,n.N=t,t.P=t.L=n,e=t):(n.P=n.N=null,this._=n,e=null);for(n.L=n.R=null,n.U=e,n.C=!0,t=n;e&&e.C;)e===(r=e.U).L?(i=r.R)&&i.C?(e.C=i.C=!1,r.C=!0,t=r):(t===e.R&&(Nc(this,e),e=(t=e).U),e.C=!1,r.C=!0,Sc(this,r)):(i=r.L)&&i.C?(e.C=i.C=!1,r.C=!0,t=r):(t===e.L&&(Sc(this,e),e=(t=e).U),e.C=!1,r.C=!0,Nc(this,r)),e=t.U;this._.C=!1},remove:function(t){t.N&&(t.N.P=t.P),t.P&&(t.P.N=t.N),t.N=t.P=null;var n,e,r,i=t.U,o=t.L,u=t.R;if(e=o?u?Ec(u):o:u,i?i.L===t?i.L=e:i.R=e:this._=e,o&&u?(r=e.C,e.C=t.C,e.L=o,o.U=e,e!==u?(i=e.U,e.U=t.U,t=e.R,i.L=t,e.R=u,u.U=e):(e.U=i,i=e,t=e.R)):(r=t.C,t=e),t&&(t.U=i),!r)if(t&&t.C)t.C=!1;else{do{if(t===this._)break;if(t===i.L){if((n=i.R).C&&(n.C=!1,i.C=!0,Nc(this,i),n=i.R),n.L&&n.L.C||n.R&&n.R.C){n.R&&n.R.C||(n.L.C=!1,n.C=!0,Sc(this,n),n=i.R),n.C=i.C,i.C=n.R.C=!1,Nc(this,i),t=this._;break}}else if((n=i.L).C&&(n.C=!1,i.C=!0,Sc(this,i),n=i.L),n.L&&n.L.C||n.R&&n.R.C){n.L&&n.L.C||(n.R.C=!1,n.C=!0,Nc(this,n),n=i.L),n.C=i.C,i.C=n.L.C=!1,Sc(this,i),t=this._;break}n.C=!0,t=i,i=i.U}while(!t.C);t&&(t.C=!1)}}};var am,cm,sm,fm,lm,hm=[],pm=[],dm=1e-6,vm=1e-12;Kc.prototype={constructor:Kc,polygons:function(){var t=this.edges;return this.cells.map(function(n){var e=n.halfedges.map(function(e){return Dc(n,t[e])});return e.data=n.site.data,e})},triangles:function(){var t=[],n=this.edges;return this.cells.forEach(function(e,r){if(o=(i=e.halfedges).length)for(var i,o,u,a=e.site,c=-1,s=n[i[o-1]],f=s.left===a?s.right:s.left;++c=a)return null;var c=t-i.site[0],s=n-i.site[1],f=c*c+s*s;do{i=o.cells[r=u],u=null,i.halfedges.forEach(function(e){var r=o.edges[e],a=r.left;if(a!==i.site&&a||(a=r.right)){var c=t-a[0],s=n-a[1],l=c*c+s*s;lt?1:n>=t?0:NaN},t.deviation=_s,t.extent=ys,t.histogram=function(){function t(t){var o,u,a=t.length,c=new Array(a);for(o=0;ol;)h.pop(),--p;var d,v=new Array(p+1);for(o=0;o<=p;++o)(d=v[o]=[]).x0=o>0?h[o-1]:f,d.x1=o=e)for(r=e;++or&&(r=e)}else for(;++o=e)for(r=e;++or&&(r=e);return r},t.mean=function(t,n){var e,r=t.length,i=r,o=-1,u=0;if(null==n)for(;++o=o.length)return null!=e&&n.sort(e),null!=r?r(n):n;for(var c,s,f,l=-1,h=n.length,p=o[i++],d=we(),v=u();++lo.length)return t;var i,a=u[e-1];return null!=r&&e>=o.length?i=t.entries():(i=[],t.each(function(t,r){i.push({key:r,values:n(t,e)})})),null!=a?i.sort(function(t,n){return a(t.key,n.key)}):i}var e,r,i,o=[],u=[];return i={object:function(n){return t(n,0,Me,Te)},map:function(n){return t(n,0,ke,Ne)},entries:function(e){return n(t(e,0,ke,Ne),0)},key:function(t){return o.push(t),i},sortKeys:function(t){return u[o.length-1]=t,i},sortValues:function(t){return e=t,i},rollup:function(t){return r=t,i}}},t.set=Ee,t.map=we,t.keys=function(t){var n=[];for(var e in t)n.push(e);return n},t.values=function(t){var n=[];for(var e in t)n.push(t[e]);return n},t.entries=function(t){var n=[];for(var e in t)n.push({key:e,value:t[e]});return n},t.color=Tt,t.rgb=Et,t.hsl=Pt,t.lab=Ut,t.hcl=jt,t.cubehelix=$t,t.dispatch=h,t.drag=function(){function n(t){t.on("mousedown.drag",e).filter(bt).on("touchstart.drag",o).on("touchmove.drag",u).on("touchend.drag touchcancel.drag",a).style("touch-action","none").style("-webkit-tap-highlight-color","rgba(0,0,0,0)")}function e(){if(!p&&d.apply(this,arguments)){var n=c("mouse",v.apply(this,arguments),Ks,this,arguments);n&&(cf(t.event.view).on("mousemove.drag",r,!0).on("mouseup.drag",i,!0),lf(t.event.view),vt(),l=!1,s=t.event.clientX,f=t.event.clientY,n("start"))}}function r(){if(ff(),!l){var n=t.event.clientX-s,e=t.event.clientY-f;l=n*n+e*e>x}y.mouse("drag")}function i(){cf(t.event.view).on("mousemove.drag mouseup.drag",null),_t(t.event.view,l),ff(),y.mouse("end")}function o(){if(d.apply(this,arguments)){var n,e,r=t.event.changedTouches,i=v.apply(this,arguments),o=r.length;for(n=0;nc+p||is+p||or.index){var d=c-a.x-a.vx,v=s-a.y-a.vy,_=d*d+v*v;_t.r&&(t.r=t[n].r)}function r(){if(i){var n,e,r=i.length;for(o=new Array(r),n=0;n=f)){(t.data!==o||t.next)&&(0===i&&(i=cp(),p+=i*i),0===c&&(c=cp(),p+=c*c),p1?(null==n?l.remove(t):l.set(t,i(n)),o):l.get(t)},find:function(n,e,r){var i,o,u,a,c,s=0,f=t.length;for(null==r?r=1/0:r*=r,s=0;s1?(d.on(t,n),o):d.on(t)}}},t.forceX=function(t){function n(t){for(var n,e=0,u=r.length;exr(r[0],r[1])&&(r[1]=i[1]),xr(i[0],r[1])>xr(r[0],r[1])&&(r[0]=i[0])):o.push(r=i);for(u=-1/0,n=0,r=o[e=o.length-1];n<=e;r=i,++n)i=o[n],(a=xr(r[1],i[0]))>u&&(u=a,Lp=i[0],Up=r[1])}return Bp=jp=null,Lp===1/0||qp===1/0?[[NaN,NaN],[NaN,NaN]]:[[Lp,qp],[Up,Dp]]},t.geoCentroid=function(t){Hp=Xp=$p=Vp=Wp=Zp=Gp=Jp=Qp=Kp=td=0,Ed(t,Ld);var n=Qp,e=Kp,r=td,i=n*n+e*e+r*r;return i<1e-12&&(n=Zp,e=Gp,r=Jp,Xp=.12&&i<.234&&r>=-.425&&r<-.214?s:i>=.166&&i<.234&&r>=-.214&&r<-.115?f:c).invert(t)},t.stream=function(t){return e&&r===t?e:e=Ri([c.stream(r=t),s.stream(t),f.stream(t)])},t.precision=function(t){return arguments.length?(c.precision(t),s.precision(t),f.precision(t),n()):c.precision()},t.scale=function(n){return arguments.length?(c.scale(n),s.scale(.35*n),f.scale(n),t.translate(c.translate())):c.scale()},t.translate=function(t){if(!arguments.length)return c.translate();var e=c.scale(),r=+t[0],a=+t[1];return i=c.translate(t).clipExtent([[r-.455*e,a-.238*e],[r+.455*e,a+.238*e]]).stream(l),o=s.translate([r-.307*e,a+.201*e]).clipExtent([[r-.425*e+ad,a+.12*e+ad],[r-.214*e-ad,a+.234*e-ad]]).stream(l),u=f.translate([r-.205*e,a+.212*e]).clipExtent([[r-.214*e+ad,a+.166*e+ad],[r-.115*e-ad,a+.234*e-ad]]).stream(l),n()},t.fitExtent=function(n,e){return Ti(t,n,e)},t.fitSize=function(n,e){return ki(t,n,e)},t.scale(1070)},t.geoAzimuthalEqualArea=function(){return Ei($v).scale(124.75).clipAngle(179.999)},t.geoAzimuthalEqualAreaRaw=$v,t.geoAzimuthalEquidistant=function(){return Ei(Vv).scale(79.4188).clipAngle(179.999)},t.geoAzimuthalEquidistantRaw=Vv,t.geoConicConformal=function(){return Ci(Fi).scale(109.5).parallels([30,30])},t.geoConicConformalRaw=Fi,t.geoConicEqualArea=Hv,t.geoConicEqualAreaRaw=Pi,t.geoConicEquidistant=function(){return Ci(Yi).scale(131.154).center([0,13.9389])},t.geoConicEquidistantRaw=Yi,t.geoEquirectangular=function(){return Ei(Ii).scale(152.63)},t.geoEquirectangularRaw=Ii,t.geoGnomonic=function(){return Ei(Bi).scale(144.049).clipAngle(60)},t.geoGnomonicRaw=Bi,t.geoIdentity=function(){function t(){return i=o=null,u}var n,e,r,i,o,u,a=1,c=0,s=0,f=1,l=1,h=lv,p=null,d=lv;return u={stream:function(t){return i&&o===t?i:i=h(d(o=t))},clipExtent:function(i){return arguments.length?(d=null==i?(p=n=e=r=null,lv):Br(p=+i[0][0],n=+i[0][1],e=+i[1][0],r=+i[1][1]),t()):null==p?null:[[p,n],[e,r]]},scale:function(n){return arguments.length?(h=ji((a=+n)*f,a*l,c,s),t()):a},translate:function(n){return arguments.length?(h=ji(a*f,a*l,c=+n[0],s=+n[1]),t()):[c,s]},reflectX:function(n){return arguments.length?(h=ji(a*(f=n?-1:1),a*l,c,s),t()):f<0},reflectY:function(n){return arguments.length?(h=ji(a*f,a*(l=n?-1:1),c,s),t()):l<0},fitExtent:function(t,n){return Ti(u,t,n)},fitSize:function(t,n){return ki(u,t,n)}}},t.geoProjection=Ei,t.geoProjectionMutator=Ai,t.geoMercator=function(){return Di(Ui).scale(961/ld)},t.geoMercatorRaw=Ui,t.geoOrthographic=function(){return Ei(Hi).scale(249.5).clipAngle(90+ad)},t.geoOrthographicRaw=Hi,t.geoStereographic=function(){return Ei(Xi).scale(250).clipAngle(142)},t.geoStereographicRaw=Xi,t.geoTransverseMercator=function(){var t=Di($i),n=t.center,e=t.rotate;return t.center=function(t){return arguments.length?n([-t[1],t[0]]):(t=n(),[t[1],-t[0]])},t.rotate=function(t){return arguments.length?e([t[0],t[1],t.length>2?t[2]+90:90]):(t=e(),[t[0],t[1],t[2]-90])},e([0,0,90]).scale(159.155)},t.geoTransverseMercatorRaw=$i,t.geoRotation=Wd,t.geoStream=Ed,t.geoTransform=function(t){return{stream:wi(t)}},t.cluster=function(){function t(t){var o,u=0;t.eachAfter(function(t){var e=t.children;e?(t.x=Wi(e),t.y=Gi(e)):(t.x=o?u+=n(t,o):0,t.y=0,o=t)});var a=Qi(t),c=Ki(t),s=a.x-n(a,c)/2,f=c.x+n(c,a)/2;return t.eachAfter(i?function(n){n.x=(n.x-t.x)*e,n.y=(t.y-n.y)*r}:function(n){n.x=(n.x-s)/(f-s)*e,n.y=(1-(t.y?n.y/t.y:1))*r})}var n=Vi,e=1,r=1,i=!1;return t.separation=function(e){return arguments.length?(n=e,t):n},t.size=function(n){return arguments.length?(i=!1,e=+n[0],r=+n[1],t):i?null:[e,r]},t.nodeSize=function(n){return arguments.length?(i=!0,e=+n[0],r=+n[1],t):i?[e,r]:null},t},t.hierarchy=eo,t.pack=function(){function t(t){return t.x=e/2,t.y=r/2,n?t.eachBefore(No(n)).eachAfter(So(i,.5)).eachBefore(Eo(1)):t.eachBefore(No(ko)).eachAfter(So(To,1)).eachAfter(So(i,t.r/Math.min(e,r))).eachBefore(Eo(Math.min(e,r)/(2*t.r))),t}var n=null,e=1,r=1,i=To;return t.radius=function(e){return arguments.length?(n=wo(e),t):n},t.size=function(n){return arguments.length?(e=+n[0],r=+n[1],t):[e,r]},t.padding=function(n){return arguments.length?(i="function"==typeof n?n:Gv(+n),t):i},t},t.packSiblings=function(t){return bo(t),t},t.packEnclose=Zv,t.partition=function(){function t(t){var u=t.height+1;return t.x0=t.y0=i,t.x1=e,t.y1=r/u,t.eachBefore(n(r,u)),o&&t.eachBefore(Jv),t}function n(t,n){return function(e){e.children&&Qv(e,e.x0,t*(e.depth+1)/n,e.x1,t*(e.depth+2)/n);var r=e.x0,o=e.y0,u=e.x1-i,a=e.y1-i;u0)throw new Error("cycle");return o}var n=Ao,e=Co;return t.id=function(e){return arguments.length?(n=Mo(e),t):n},t.parentId=function(n){return arguments.length?(e=Mo(n),t):e},t},t.tree=function(){function t(t){var r=Oo(t);if(r.eachAfter(n),r.parent.m=-r.z,r.eachBefore(e),c)t.eachBefore(i);else{var s=t,f=t,l=t;t.eachBefore(function(t){t.xf.x&&(f=t),t.depth>l.depth&&(l=t)});var h=s===f?1:o(s,f)/2,p=h-s.x,d=u/(f.x+h+p),v=a/(l.depth||1);t.eachBefore(function(t){t.x=(t.x+p)*d,t.y=t.depth*v})}return t}function n(t){var n=t.children,e=t.parent.children,i=t.i?e[t.i-1]:null;if(n){qo(t);var u=(n[0].z+n[n.length-1].z)/2;i?(t.z=i.z+o(t._,i._),t.m=t.z-u):t.z=u}else i&&(t.z=i.z+o(t._,i._));t.parent.A=r(t,i,t.parent.A||e[0])}function e(t){t._.x=t.z+t.parent.m,t.m+=t.parent.m}function r(t,n,e){if(n){for(var r,i=t,u=t,a=n,c=i.parent.children[0],s=i.m,f=u.m,l=a.m,h=c.m;a=Ro(a),i=Po(i),a&&i;)c=Po(c),(u=Ro(u)).a=t,(r=a.z+l-i.z-s+o(a._,i._))>0&&(Lo(Uo(a,t,e),t,r),s+=r,f+=r),l+=a.m,s+=i.m,h+=c.m,f+=u.m;a&&!Ro(u)&&(u.t=a,u.m+=l-f),i&&!Po(c)&&(c.t=i,c.m+=s-h,e=t)}return e}function i(t){t.x*=u,t.y=t.depth*a}var o=zo,u=1,a=1,c=null;return t.separation=function(n){return arguments.length?(o=n,t):o},t.size=function(n){return arguments.length?(c=!1,u=+n[0],a=+n[1],t):c?null:[u,a]},t.nodeSize=function(n){return arguments.length?(c=!0,u=+n[0],a=+n[1],t):c?[u,a]:null},t},t.treemap=function(){function t(t){return t.x0=t.y0=0,t.x1=i,t.y1=o,t.eachBefore(n),u=[0],r&&t.eachBefore(Jv),t}function n(t){var n=u[t.depth],r=t.x0+n,i=t.y0+n,o=t.x1-n,h=t.y1-n;o=n-1){var s=c[t];return s.x0=r,s.y0=i,s.x1=u,void(s.y1=a)}for(var l=f[t],h=e/2+l,p=t+1,d=n-1;p>>1;f[v]a-i){var g=(r*y+u*_)/e;o(t,p,_,r,i,g,a),o(p,n,y,g,i,u,a)}else{var m=(i*y+a*_)/e;o(t,p,_,r,i,u,m),o(p,n,y,r,m,u,a)}}var u,a,c=t.children,s=c.length,f=new Array(s+1);for(f[0]=a=u=0;u=0;--n)s.push(t[r[o[n]][2]]);for(n=+a;na!=s>a&&u<(c-e)*(a-r)/(s-r)+e&&(f=!f),c=e,s=r;return f},t.polygonLength=function(t){for(var n,e,r=-1,i=t.length,o=t[i-1],u=o[0],a=o[1],c=0;++r1)&&(t-=Math.floor(t));var n=Math.abs(t-.5);return ig.h=360*t-100,ig.s=1.5-1.5*n,ig.l=.8-.9*n,ig+""},t.interpolateWarm=eg,t.interpolateCool=rg,t.interpolateViridis=og,t.interpolateMagma=ug,t.interpolateInferno=ag,t.interpolatePlasma=cg,t.scaleSequential=Ea,t.creator=Hs,t.local=m,t.matcher=Zs,t.mouse=Ks,t.namespace=js,t.namespaces=Bs,t.select=cf,t.selectAll=function(t){return"string"==typeof t?new pt([document.querySelectorAll(t)],[document.documentElement]):new pt([null==t?[]:t],af)},t.selection=dt,t.selector=tf,t.selectorAll=nf,t.style=B,t.touch=sf,t.touches=function(t,n){null==n&&(n=Js().touches);for(var e=0,r=n?n.length:0,i=new Array(r);eh;if(c||(c=t=ve()),lyg)if(d>xg-yg)c.moveTo(l*hg(h),l*vg(h)),c.arc(0,0,l,h,p,!v),f>yg&&(c.moveTo(f*hg(p),f*vg(p)),c.arc(0,0,f,p,h,v));else{var _,y,g=h,m=p,x=h,b=p,w=d,M=d,T=a.apply(this,arguments)/2,k=T>yg&&(i?+i.apply(this,arguments):_g(f*f+l*l)),N=dg(fg(l-f)/2,+r.apply(this,arguments)),S=N,E=N;if(k>yg){var A=Ca(k/f*vg(T)),C=Ca(k/l*vg(T));(w-=2*A)>yg?(A*=v?1:-1,x+=A,b-=A):(w=0,x=b=(h+p)/2),(M-=2*C)>yg?(C*=v?1:-1,g+=C,m-=C):(M=0,g=m=(h+p)/2)}var z=l*hg(g),P=l*vg(g),R=f*hg(b),L=f*vg(b);if(N>yg){var q=l*hg(m),U=l*vg(m),D=f*hg(x),O=f*vg(x);if(dyg?Ua(z,P,D,O,q,U,R,L):[R,L],I=z-F[0],Y=P-F[1],B=q-F[0],j=U-F[1],H=1/vg(Aa((I*B+Y*j)/(_g(I*I+Y*Y)*_g(B*B+j*j)))/2),X=_g(F[0]*F[0]+F[1]*F[1]);S=dg(N,(f-X)/(H-1)),E=dg(N,(l-X)/(H+1))}}M>yg?E>yg?(_=Da(D,O,z,P,l,E,v),y=Da(q,U,R,L,l,E,v),c.moveTo(_.cx+_.x01,_.cy+_.y01),Eyg&&w>yg?S>yg?(_=Da(R,L,q,U,f,-S,v),y=Da(z,P,D,O,f,-S,v),c.lineTo(_.cx+_.x01,_.cy+_.y01),S0&&(p+=l);for(null!=e?d.sort(function(t,n){return e(v[t],v[n])}):null!=r&&d.sort(function(n,e){return r(t[n],t[e])}),a=0,s=p?(y-h*m)/p:0;a0?l*s:0)+m,v[c]={data:t[c],index:a,value:l,startAngle:_,endAngle:f,padAngle:g};return v}var n=kg,e=Tg,r=null,i=sg(0),o=sg(xg),u=sg(0);return t.value=function(e){return arguments.length?(n="function"==typeof e?e:sg(+e),t):n},t.sortValues=function(n){return arguments.length?(e=n,r=null,t):e},t.sort=function(n){return arguments.length?(r=n,e=null,t):r},t.startAngle=function(n){return arguments.length?(i="function"==typeof n?n:sg(+n),t):i},t.endAngle=function(n){return arguments.length?(o="function"==typeof n?n:sg(+n),t):o},t.padAngle=function(n){return arguments.length?(u="function"==typeof n?n:sg(+n),t):u},t},t.areaRadial=Eg,t.radialArea=Eg,t.lineRadial=Sg,t.radialLine=Sg,t.pointRadial=Ag,t.linkHorizontal=function(){return $a(Va)},t.linkVertical=function(){return $a(Wa)},t.linkRadial=function(){var t=$a(Za);return t.angle=t.x,delete t.x,t.radius=t.y,delete t.y,t},t.symbol=function(){function t(){var t;if(r||(r=t=ve()),n.apply(this,arguments).draw(r,+e.apply(this,arguments)),t)return r=null,t+""||null}var n=sg(zg),e=sg(64),r=null;return t.type=function(e){return arguments.length?(n="function"==typeof e?e:sg(e),t):n},t.size=function(n){return arguments.length?(e="function"==typeof n?n:sg(+n),t):e},t.context=function(n){return arguments.length?(r=null==n?null:n,t):r},t},t.symbols=Wg,t.symbolCircle=zg,t.symbolCross=Pg,t.symbolDiamond=qg,t.symbolSquare=Ig,t.symbolStar=Fg,t.symbolTriangle=Bg,t.symbolWye=Vg,t.curveBasisClosed=function(t){return new Qa(t)},t.curveBasisOpen=function(t){return new Ka(t)},t.curveBasis=function(t){return new Ja(t)},t.curveBundle=Gg,t.curveCardinalClosed=Qg,t.curveCardinalOpen=Kg,t.curveCardinal=Jg,t.curveCatmullRomClosed=nm,t.curveCatmullRomOpen=em,t.curveCatmullRom=tm,t.curveLinearClosed=function(t){return new sc(t)},t.curveLinear=bg,t.curveMonotoneX=function(t){return new dc(t)},t.curveMonotoneY=function(t){return new vc(t)},t.curveNatural=function(t){return new yc(t)},t.curveStep=function(t){return new mc(t,.5)},t.curveStepAfter=function(t){return new mc(t,1)},t.curveStepBefore=function(t){return new mc(t,0)},t.stack=function(){function t(t){var o,u,a=n.apply(this,arguments),c=t.length,s=a.length,f=new Array(s);for(o=0;o0){for(var e,r,i,o=0,u=t[0].length;o1)for(var e,r,i,o,u,a,c=0,s=t[n[0]].length;c=0?(r[0]=o,r[1]=o+=i):i<0?(r[1]=u,r[0]=u+=i):r[0]=o},t.stackOffsetNone=rm,t.stackOffsetSilhouette=function(t,n){if((e=t.length)>0){for(var e,r=0,i=t[n[0]],o=i.length;r0&&(r=(e=t[n[0]]).length)>0){for(var e,r,i,o=0,u=1;uUl&&e.name===n)return new Gn([[t]],yh,n,+r)}return null},t.interrupt=jl,t.voronoi=function(){function t(t){return new Kc(t.map(function(r,i){var o=[Math.round(n(r,i,t)/dm)*dm,Math.round(e(r,i,t)/dm)*dm];return o.index=i,o.data=r,o}),r)}var n=wc,e=Mc,r=null;return t.polygons=function(n){return t(n).polygons()},t.links=function(n){return t(n).links()},t.triangles=function(n){return t(n).triangles()},t.x=function(e){return arguments.length?(n="function"==typeof e?e:um(+e),t):n},t.y=function(n){return arguments.length?(e="function"==typeof n?n:um(+n),t):e},t.extent=function(n){return arguments.length?(r=null==n?null:[[+n[0][0],+n[0][1]],[+n[1][0],+n[1][1]]],t):r&&[[r[0][0],r[0][1]],[r[1][0],r[1][1]]]},t.size=function(n){return arguments.length?(r=null==n?null:[[0,0],[+n[0],+n[1]]],t):r&&[r[1][0]-r[0][0],r[1][1]-r[0][1]]},t},t.zoom=function(){function n(t){t.property("__zoom",us).on("wheel.zoom",s).on("mousedown.zoom",f).on("dblclick.zoom",l).filter(cs).on("touchstart.zoom",p).on("touchmove.zoom",d).on("touchend.zoom touchcancel.zoom",v).style("touch-action","none").style("-webkit-tap-highlight-color","rgba(0,0,0,0)")}function e(t,n){return(n=Math.max(b,Math.min(w,n)))===t.k?t:new ns(n,t.x,t.y)}function r(t,n,e){var r=n[0]-e[0]*t.k,i=n[1]-e[1]*t.k;return r===t.x&&i===t.y?t:new ns(t.k,r,i)}function i(t,n){var e=t.invertX(n[0][0])-M,r=t.invertX(n[1][0])-T,i=t.invertY(n[0][1])-k,o=t.invertY(n[1][1])-S;return t.translate(r>e?(e+r)/2:Math.min(0,e)||Math.max(0,r),o>i?(i+o)/2:Math.min(0,i)||Math.max(0,o))}function o(t){return[(+t[0][0]+ +t[1][0])/2,(+t[0][1]+ +t[1][1])/2]}function u(t,n,e){t.on("start.zoom",function(){a(this,arguments).start()}).on("interrupt.zoom end.zoom",function(){a(this,arguments).end()}).tween("zoom",function(){var t=this,r=arguments,i=a(t,r),u=m.apply(t,r),c=e||o(u),s=Math.max(u[1][0]-u[0][0],u[1][1]-u[0][1]),f=t.__zoom,l="function"==typeof n?n.apply(t,r):n,h=A(f.invert(c).concat(s/f.k),l.invert(c).concat(s/l.k));return function(t){if(1===t)t=l;else{var n=h(t),e=s/n[2];t=new ns(e,c[0]-n[0]*e,c[1]-n[1]*e)}i.zoom(null,t)}})}function a(t,n){for(var e,r=0,i=C.length;rL}n.zoom("mouse",i(r(n.that.__zoom,n.mouse[0]=Ks(n.that),n.mouse[1]),n.extent))},!0).on("mouseup.zoom",function(){e.on("mousemove.zoom mouseup.zoom",null),_t(t.event.view,n.moved),gm(),n.end()},!0),o=Ks(this),u=t.event.clientX,c=t.event.clientY;lf(t.event.view),rs(),n.mouse=[o,this.__zoom.invert(o)],jl(this),n.start()}}function l(){if(g.apply(this,arguments)){var o=this.__zoom,a=Ks(this),c=o.invert(a),s=i(r(e(o,o.k*(t.event.shiftKey?.5:2)),a,c),m.apply(this,arguments));gm(),E>0?cf(this).transition().duration(E).call(u,s,a):cf(this).call(n.transform,s)}}function p(){if(g.apply(this,arguments)){var n,e,r,i,o=a(this,arguments),u=t.event.changedTouches,c=u.length;for(rs(),e=0;e
 
';this.parentEl=i("object"==typeof e&&e.parentEl&&i(e.parentEl).length?e.parentEl:this.parentEl),this.container=i(n).appendTo(this.parentEl),this.setOptions(e,a);var r=this.container;i.each(this.buttonClasses,function(t,e){r.find("button").addClass(e)}),this.container.find(".daterangepicker_start_input label").html(this.locale.fromLabel),this.container.find(".daterangepicker_end_input label").html(this.locale.toLabel),this.applyClass.length&&this.container.find(".applyBtn").addClass(this.applyClass),this.cancelClass.length&&this.container.find(".cancelBtn").addClass(this.cancelClass),this.container.find(".applyBtn").html(this.locale.applyLabel),this.container.find(".cancelBtn").html(this.locale.cancelLabel),this.container.find(".calendar").on("click.daterangepicker",".prev",i.proxy(this.clickPrev,this)).on("click.daterangepicker",".next",i.proxy(this.clickNext,this)).on("click.daterangepicker","td.available",i.proxy(this.clickDate,this)).on("mouseenter.daterangepicker","td.available",i.proxy(this.hoverDate,this)).on("mouseleave.daterangepicker","td.available",i.proxy(this.updateFormInputs,this)).on("change.daterangepicker","select.yearselect",i.proxy(this.updateMonthYear,this)).on("change.daterangepicker","select.monthselect",i.proxy(this.updateMonthYear,this)).on("change.daterangepicker","select.hourselect,select.minuteselect,select.secondselect,select.ampmselect",i.proxy(this.updateTime,this)),this.container.find(".ranges").on("click.daterangepicker","button.applyBtn",i.proxy(this.clickApply,this)).on("click.daterangepicker","button.cancelBtn",i.proxy(this.clickCancel,this)).on("click.daterangepicker",".daterangepicker_start_input,.daterangepicker_end_input",i.proxy(this.showCalendars,this)).on("change.daterangepicker",".daterangepicker_start_input,.daterangepicker_end_input",i.proxy(this.inputsChanged,this)).on("keydown.daterangepicker",".daterangepicker_start_input,.daterangepicker_end_input",i.proxy(this.inputsKeydown,this)).on("click.daterangepicker","li",i.proxy(this.clickRange,this)).on("mouseenter.daterangepicker","li",i.proxy(this.enterRange,this)).on("mouseleave.daterangepicker","li",i.proxy(this.updateFormInputs,this)),this.element.is("input")?this.element.on({"click.daterangepicker":i.proxy(this.show,this),"focus.daterangepicker":i.proxy(this.show,this),"keyup.daterangepicker":i.proxy(this.updateFromControl,this)}):this.element.on("click.daterangepicker",i.proxy(this.toggle,this))};s.prototype={constructor:s,setOptions:function(t,e){if(this.startDate=a().startOf("day"),this.endDate=a().endOf("day"),this.timeZone=a().zone(),this.minDate=!1,this.maxDate=!1,this.dateLimit=!1,this.showDropdowns=!1,this.showWeekNumbers=!1,this.timePicker=!1,this.timePickerSeconds=!1,this.timePickerIncrement=30,this.timePicker12Hour=!0,this.singleDatePicker=!1,this.ranges={},this.opens="right",this.element.hasClass("pull-right")&&(this.opens="left"),this.buttonClasses=["btn","btn-small btn-sm"],this.applyClass="btn-success",this.cancelClass="btn-default",this.format="MM/DD/YYYY",this.separator=" - ",this.locale={applyLabel:"Apply",cancelLabel:"Cancel",fromLabel:"From",toLabel:"To",weekLabel:"W",customRangeLabel:"Custom Range",daysOfWeek:a.weekdaysMin(),monthNames:a.monthsShort(),firstDay:a.localeData()._week.dow},this.cb=function(){},"string"==typeof t.format&&(this.format=t.format),"string"==typeof t.separator&&(this.separator=t.separator),"string"==typeof t.startDate&&(this.startDate=a(t.startDate,this.format)),"string"==typeof t.endDate&&(this.endDate=a(t.endDate,this.format)),"string"==typeof t.minDate&&(this.minDate=a(t.minDate,this.format)),"string"==typeof t.maxDate&&(this.maxDate=a(t.maxDate,this.format)),"object"==typeof t.startDate&&(this.startDate=a(t.startDate)),"object"==typeof t.endDate&&(this.endDate=a(t.endDate)),"object"==typeof t.minDate&&(this.minDate=a(t.minDate)),"object"==typeof t.maxDate&&(this.maxDate=a(t.maxDate)),"string"==typeof t.applyClass&&(this.applyClass=t.applyClass),"string"==typeof t.cancelClass&&(this.cancelClass=t.cancelClass),"object"==typeof t.dateLimit&&(this.dateLimit=t.dateLimit),"object"==typeof t.locale&&("object"==typeof t.locale.daysOfWeek&&(this.locale.daysOfWeek=t.locale.daysOfWeek.slice()),"object"==typeof t.locale.monthNames&&(this.locale.monthNames=t.locale.monthNames.slice()),"number"==typeof t.locale.firstDay&&(this.locale.firstDay=t.locale.firstDay),"string"==typeof t.locale.applyLabel&&(this.locale.applyLabel=t.locale.applyLabel),"string"==typeof t.locale.cancelLabel&&(this.locale.cancelLabel=t.locale.cancelLabel),"string"==typeof t.locale.fromLabel&&(this.locale.fromLabel=t.locale.fromLabel),"string"==typeof t.locale.toLabel&&(this.locale.toLabel=t.locale.toLabel),"string"==typeof t.locale.weekLabel&&(this.locale.weekLabel=t.locale.weekLabel),"string"==typeof t.locale.customRangeLabel&&(this.locale.customRangeLabel=t.locale.customRangeLabel)),"string"==typeof t.opens&&(this.opens=t.opens),"boolean"==typeof t.showWeekNumbers&&(this.showWeekNumbers=t.showWeekNumbers),"string"==typeof t.buttonClasses&&(this.buttonClasses=[t.buttonClasses]),"object"==typeof t.buttonClasses&&(this.buttonClasses=t.buttonClasses),"boolean"==typeof t.showDropdowns&&(this.showDropdowns=t.showDropdowns),"boolean"==typeof t.singleDatePicker&&(this.singleDatePicker=t.singleDatePicker,this.singleDatePicker&&(this.endDate=this.startDate.clone())),"boolean"==typeof t.timePicker&&(this.timePicker=t.timePicker),"boolean"==typeof t.timePickerSeconds&&(this.timePickerSeconds=t.timePickerSeconds),"number"==typeof t.timePickerIncrement&&(this.timePickerIncrement=t.timePickerIncrement),"boolean"==typeof t.timePicker12Hour&&(this.timePicker12Hour=t.timePicker12Hour),0!=this.locale.firstDay)for(var s=this.locale.firstDay;s>0;)this.locale.daysOfWeek.push(this.locale.daysOfWeek.shift()),s--;var n,r,o;if("undefined"==typeof t.startDate&&"undefined"==typeof t.endDate&&i(this.element).is("input[type=text]")){var h=i(this.element).val(),l=h.split(this.separator);n=r=null,2==l.length?(n=a(l[0],this.format),r=a(l[1],this.format)):this.singleDatePicker&&""!==h&&(n=a(h,this.format),r=a(h,this.format)),null!==n&&null!==r&&(this.startDate=n,this.endDate=r)}if("string"==typeof t.timeZone||"number"==typeof t.timeZone?(this.timeZone=t.timeZone,this.startDate.zone(this.timeZone),this.endDate.zone(this.timeZone)):this.timeZone=a(this.startDate).zone(),"object"==typeof t.ranges){for(o in t.ranges)n="string"==typeof t.ranges[o][0]?a(t.ranges[o][0],this.format):a(t.ranges[o][0]),r="string"==typeof t.ranges[o][1]?a(t.ranges[o][1],this.format):a(t.ranges[o][1]),this.minDate&&n.isBefore(this.minDate)&&(n=a(this.minDate)),this.maxDate&&r.isAfter(this.maxDate)&&(r=a(this.maxDate)),this.minDate&&r.isBefore(this.minDate)||this.maxDate&&n.isAfter(this.maxDate)||(this.ranges[o]=[n,r]);var c="
    ";for(o in this.ranges)c+="
  • "+o+"
  • ";c+="
  • "+this.locale.customRangeLabel+"
  • ",c+="
",this.container.find(".ranges ul").remove(),this.container.find(".ranges").prepend(c)}if("function"==typeof e&&(this.cb=e),this.timePicker||(this.startDate=this.startDate.startOf("day"),this.endDate=this.endDate.endOf("day")),this.singleDatePicker?(this.opens="right",this.container.addClass("single"),this.container.find(".calendar.right").show(),this.container.find(".calendar.left").hide(),this.timePicker?this.container.find(".ranges .daterangepicker_start_input, .ranges .daterangepicker_end_input").hide():this.container.find(".ranges").hide(),this.container.find(".calendar.right").hasClass("single")||this.container.find(".calendar.right").addClass("single")):(this.container.removeClass("single"),this.container.find(".calendar.right").removeClass("single"),this.container.find(".ranges").show()),this.oldStartDate=this.startDate.clone(),this.oldEndDate=this.endDate.clone(),this.oldChosenLabel=this.chosenLabel,this.leftCalendar={month:a([this.startDate.year(),this.startDate.month(),1,this.startDate.hour(),this.startDate.minute(),this.startDate.second()]),calendar:[]},this.rightCalendar={month:a([this.endDate.year(),this.endDate.month(),1,this.endDate.hour(),this.endDate.minute(),this.endDate.second()]),calendar:[]},"right"==this.opens||"center"==this.opens){var d=this.container.find(".calendar.first"),f=this.container.find(".calendar.second");f.hasClass("single")&&(f.removeClass("single"),d.addClass("single")),d.removeClass("left").addClass("right"),f.removeClass("right").addClass("left"),this.singleDatePicker&&(d.show(),f.hide())}"undefined"!=typeof t.ranges||this.singleDatePicker||this.container.addClass("show-calendar"),this.container.addClass("opens"+this.opens),this.updateView(),this.updateCalendars()},setStartDate:function(t){"string"==typeof t&&(this.startDate=a(t,this.format).zone(this.timeZone)),"object"==typeof t&&(this.startDate=a(t)),this.timePicker||(this.startDate=this.startDate.startOf("day")),this.oldStartDate=this.startDate.clone(),this.updateView(),this.updateCalendars(),this.updateInputText()},setEndDate:function(t){"string"==typeof t&&(this.endDate=a(t,this.format).zone(this.timeZone)),"object"==typeof t&&(this.endDate=a(t)),this.timePicker||(this.endDate=this.endDate.endOf("day")),this.oldEndDate=this.endDate.clone(),this.updateView(),this.updateCalendars(),this.updateInputText()},updateView:function(){this.leftCalendar.month.month(this.startDate.month()).year(this.startDate.year()).hour(this.startDate.hour()).minute(this.startDate.minute()),this.rightCalendar.month.month(this.endDate.month()).year(this.endDate.year()).hour(this.endDate.hour()).minute(this.endDate.minute()),this.updateFormInputs()},updateFormInputs:function(){this.container.find("input[name=daterangepicker_start]").val(this.startDate.format(this.format)),this.container.find("input[name=daterangepicker_end]").val(this.endDate.format(this.format)),this.startDate.isSame(this.endDate)||this.startDate.isBefore(this.endDate)?this.container.find("button.applyBtn").removeAttr("disabled"):this.container.find("button.applyBtn").attr("disabled","disabled")},updateFromControl:function(){if(this.element.is("input")&&this.element.val().length){var t=this.element.val().split(this.separator),e=null,i=null;2===t.length&&(e=a(t[0],this.format).zone(this.timeZone),i=a(t[1],this.format).zone(this.timeZone)),(this.singleDatePicker||null===e||null===i)&&(e=a(this.element.val(),this.format).zone(this.timeZone),i=e),i.isBefore(e)||(this.oldStartDate=this.startDate.clone(),this.oldEndDate=this.endDate.clone(),this.startDate=e,this.endDate=i,this.startDate.isSame(this.oldStartDate)&&this.endDate.isSame(this.oldEndDate)||this.notify(),this.updateCalendars())}},notify:function(){this.updateView(),this.cb(this.startDate,this.endDate,this.chosenLabel)},move:function(){var t={top:0,left:0},e=i(window).width();this.parentEl.is("body")||(t={top:this.parentEl.offset().top-this.parentEl.scrollTop(),left:this.parentEl.offset().left-this.parentEl.scrollLeft()},e=this.parentEl[0].clientWidth+this.parentEl.offset().left),"left"==this.opens?(this.container.css({top:this.element.offset().top+this.element.outerHeight()-t.top,right:e-this.element.offset().left-this.element.outerWidth(),left:"auto"}),this.container.offset().left<0&&this.container.css({right:"auto",left:9})):"center"==this.opens?(this.container.css({top:this.element.offset().top+this.element.outerHeight()-t.top,left:this.element.offset().left-t.left+this.element.outerWidth()/2-this.container.outerWidth()/2,right:"auto"}),this.container.offset().left<0&&this.container.css({right:"auto",left:9})):(this.container.css({top:this.element.offset().top+this.element.outerHeight()-t.top,left:this.element.offset().left-t.left,right:"auto"}),this.container.offset().left+this.container.outerWidth()>i(window).width()&&this.container.css({left:"auto",right:0}))},toggle:function(){this.element.hasClass("active")?this.hide():this.show()},show:function(){this.isShowing||(this.element.addClass("active"),this.container.show(),this.move(),this._outsideClickProxy=i.proxy(function(t){this.outsideClick(t)},this),i(document).on("mousedown.daterangepicker",this._outsideClickProxy).on("touchend.daterangepicker",this._outsideClickProxy).on("click.daterangepicker","[data-toggle=dropdown]",this._outsideClickProxy).on("focusin.daterangepicker",this._outsideClickProxy),this.isShowing=!0,this.element.trigger("show.daterangepicker",this))},outsideClick:function(t){var e=i(t.target);"focusin"==t.type||e.closest(this.element).length||e.closest(this.container).length||e.closest(".calendar-date").length||this.hide()},hide:function(){this.isShowing&&(i(document).off(".daterangepicker"),this.element.removeClass("active"),this.container.hide(),this.startDate.isSame(this.oldStartDate)&&this.endDate.isSame(this.oldEndDate)||this.notify(),this.oldStartDate=this.startDate.clone(),this.oldEndDate=this.endDate.clone(),this.isShowing=!1,this.element.trigger("hide.daterangepicker",this))},enterRange:function(t){var e=t.target.innerHTML;if(e==this.locale.customRangeLabel)this.updateView();else{var a=this.ranges[e];this.container.find("input[name=daterangepicker_start]").val(a[0].format(this.format)),this.container.find("input[name=daterangepicker_end]").val(a[1].format(this.format))}},showCalendars:function(){this.container.addClass("show-calendar"),this.move(),this.element.trigger("showCalendar.daterangepicker",this)},hideCalendars:function(){this.container.removeClass("show-calendar"),this.element.trigger("hideCalendar.daterangepicker",this)},inputsChanged:function(t){var e=i(t.target),s=a(e.val(),this.format);if(s.isValid()){var n,r;"daterangepicker_start"===e.attr("name")?(n=s,r=this.endDate):(n=this.startDate,r=s),this.setCustomDates(n,r)}},inputsKeydown:function(t){13===t.keyCode&&(this.inputsChanged(t),this.notify())},updateInputText:function(){this.element.is("input")&&!this.singleDatePicker?this.element.val(this.startDate.format(this.format)+this.separator+this.endDate.format(this.format)):this.element.is("input")&&this.element.val(this.endDate.format(this.format))},clickRange:function(t){var e=t.target.innerHTML;if(this.chosenLabel=e,e==this.locale.customRangeLabel)this.showCalendars();else{var a=this.ranges[e];this.startDate=a[0],this.endDate=a[1],this.timePicker||(this.startDate.startOf("day"),this.endDate.endOf("day")),this.leftCalendar.month.month(this.startDate.month()).year(this.startDate.year()).hour(this.startDate.hour()).minute(this.startDate.minute()),this.rightCalendar.month.month(this.endDate.month()).year(this.endDate.year()).hour(this.endDate.hour()).minute(this.endDate.minute()),this.updateCalendars(),this.updateInputText(),this.hideCalendars(),this.hide(),this.element.trigger("apply.daterangepicker",this)}},clickPrev:function(t){var e=i(t.target).parents(".calendar");e.hasClass("left")?this.leftCalendar.month.subtract(1,"month"):this.rightCalendar.month.subtract(1,"month"),this.updateCalendars()},clickNext:function(t){var e=i(t.target).parents(".calendar");e.hasClass("left")?this.leftCalendar.month.add(1,"month"):this.rightCalendar.month.add(1,"month"),this.updateCalendars()},hoverDate:function(t){var e=i(t.target).attr("data-title"),a=e.substr(1,1),s=e.substr(3,1),n=i(t.target).parents(".calendar");n.hasClass("left")?this.container.find("input[name=daterangepicker_start]").val(this.leftCalendar.calendar[a][s].format(this.format)):this.container.find("input[name=daterangepicker_end]").val(this.rightCalendar.calendar[a][s].format(this.format))},setCustomDates:function(t,e){if(this.chosenLabel=this.locale.customRangeLabel,t.isAfter(e)){var i=this.endDate.diff(this.startDate);e=a(t).add(i,"ms")}this.startDate=t,this.endDate=e,this.updateView(),this.updateCalendars()},clickDate:function(t){var e,s,n=i(t.target).attr("data-title"),r=n.substr(1,1),o=n.substr(3,1),h=i(t.target).parents(".calendar");if(h.hasClass("left")){if(e=this.leftCalendar.calendar[r][o],s=this.endDate,"object"==typeof this.dateLimit){var l=a(e).add(this.dateLimit).startOf("day");s.isAfter(l)&&(s=l)}}else if(e=this.startDate,s=this.rightCalendar.calendar[r][o],"object"==typeof this.dateLimit){var c=a(s).subtract(this.dateLimit).startOf("day");e.isBefore(c)&&(e=c)}this.singleDatePicker&&h.hasClass("left")?s=e.clone():this.singleDatePicker&&h.hasClass("right")&&(e=s.clone()),h.find("td").removeClass("active"),i(t.target).addClass("active"),this.setCustomDates(e,s),this.timePicker||s.endOf("day"),this.singleDatePicker&&!this.timePicker&&this.clickApply()},clickApply:function(){this.updateInputText(),this.hide(),this.element.trigger("apply.daterangepicker",this)},clickCancel:function(){this.startDate=this.oldStartDate,this.endDate=this.oldEndDate,this.chosenLabel=this.oldChosenLabel,this.updateView(),this.updateCalendars(),this.hide(),this.element.trigger("cancel.daterangepicker",this)},updateMonthYear:function(t){var e=i(t.target).closest(".calendar").hasClass("left"),a=e?"left":"right",s=this.container.find(".calendar."+a),n=parseInt(s.find(".monthselect").val(),10),r=s.find(".yearselect").val();this[a+"Calendar"].month.month(n).year(r),this.updateCalendars()},updateTime:function(t){var e=i(t.target).closest(".calendar"),a=e.hasClass("left"),s=parseInt(e.find(".hourselect").val(),10),n=parseInt(e.find(".minuteselect").val(),10),r=0;if(this.timePickerSeconds&&(r=parseInt(e.find(".secondselect").val(),10)),this.timePicker12Hour){var o=e.find(".ampmselect").val();"PM"===o&&12>s&&(s+=12),"AM"===o&&12===s&&(s=0)}if(a){var h=this.startDate.clone();h.hour(s),h.minute(n),h.second(r),this.startDate=h,this.leftCalendar.month.hour(s).minute(n).second(r),this.singleDatePicker&&(this.endDate=h.clone())}else{var l=this.endDate.clone();l.hour(s),l.minute(n),l.second(r),this.endDate=l,this.singleDatePicker&&(this.startDate=l.clone()),this.rightCalendar.month.hour(s).minute(n).second(r)}this.updateView(),this.updateCalendars()},updateCalendars:function(){this.leftCalendar.calendar=this.buildCalendar(this.leftCalendar.month.month(),this.leftCalendar.month.year(),this.leftCalendar.month.hour(),this.leftCalendar.month.minute(),this.leftCalendar.month.second(),"left"),this.rightCalendar.calendar=this.buildCalendar(this.rightCalendar.month.month(),this.rightCalendar.month.year(),this.rightCalendar.month.hour(),this.rightCalendar.month.minute(),this.rightCalendar.month.second(),"right"),this.container.find(".calendar.left").empty().html(this.renderCalendar(this.leftCalendar.calendar,this.startDate,this.minDate,this.maxDate,"left")),this.container.find(".calendar.right").empty().html(this.renderCalendar(this.rightCalendar.calendar,this.endDate,this.singleDatePicker?this.minDate:this.startDate,this.maxDate,"right")),this.container.find(".ranges li").removeClass("active");var t=!0,e=0;for(var a in this.ranges)this.timePicker?this.startDate.isSame(this.ranges[a][0])&&this.endDate.isSame(this.ranges[a][1])&&(t=!1,this.chosenLabel=this.container.find(".ranges li:eq("+e+")").addClass("active").html()):this.startDate.format("YYYY-MM-DD")==this.ranges[a][0].format("YYYY-MM-DD")&&this.endDate.format("YYYY-MM-DD")==this.ranges[a][1].format("YYYY-MM-DD")&&(t=!1,this.chosenLabel=this.container.find(".ranges li:eq("+e+")").addClass("active").html()),e++;t&&(this.chosenLabel=this.container.find(".ranges li:last").addClass("active").html(),this.showCalendars())},buildCalendar:function(t,e,i,s,n,r){var o,h=a([e,t]).daysInMonth(),l=a([e,t,1]),c=a([e,t,h]),d=a(l).subtract(1,"month").month(),f=a(l).subtract(1,"month").year(),m=a([f,d]).daysInMonth(),p=l.day(),u=[];for(u.firstDay=l,u.lastDay=c,o=0;6>o;o++)u[o]=[];var D=m-p+this.locale.firstDay+1;D>m&&(D-=7),p==this.locale.firstDay&&(D=m-6);var g,y,k=a([f,d,D,12,s,n]).zone(this.timeZone);for(o=0,g=0,y=0;42>o;o++,g++,k=a(k).add(24,"hour"))o>0&&g%7===0&&(g=0,y++),u[y][g]=k.clone().hour(i),k.hour(12),this.minDate&&u[y][g].format("YYYY-MM-DD")==this.minDate.format("YYYY-MM-DD")&&u[y][g].isBefore(this.minDate)&&"left"==r&&(u[y][g]=this.minDate.clone()),this.maxDate&&u[y][g].format("YYYY-MM-DD")==this.maxDate.format("YYYY-MM-DD")&&u[y][g].isAfter(this.maxDate)&&"right"==r&&(u[y][g]=this.maxDate.clone());return u},renderDropdowns:function(t,e,a){for(var i=t.month(),s=t.year(),n=a&&a.year()||s+5,r=e&&e.year()||s-50,o='";for(var d='",o+d},renderCalendar:function(t,e,a,s,n){var r='
';r+='',r+="",r+="",this.showWeekNumbers&&(r+=""),r+=!a||a.isBefore(t.firstDay)?'':"";var o=this.locale.monthNames[t[1][1].month()]+t[1][1].format(" YYYY");this.showDropdowns&&(o=this.renderDropdowns(t[1][1],a,s)),r+='",r+=!s||s.isAfter(t.lastDay)?'':"",r+="",r+="",this.showWeekNumbers&&(r+='"),i.each(this.locale.daysOfWeek,function(t,e){r+=""}),r+="",r+="",r+="";for(var h=0;6>h;h++){r+="",this.showWeekNumbers&&(r+='");for(var l=0;7>l;l++){var c="available ";c+=t[h][l].month()==t[1][1].month()?"":"off",a&&t[h][l].isBefore(a,"day")||s&&t[h][l].isAfter(s,"day")?c=" off disabled ":t[h][l].format("YYYY-MM-DD")==e.format("YYYY-MM-DD")?(c+=" active ",t[h][l].format("YYYY-MM-DD")==this.startDate.format("YYYY-MM-DD")&&(c+=" start-date "),t[h][l].format("YYYY-MM-DD")==this.endDate.format("YYYY-MM-DD")&&(c+=" end-date ")):t[h][l]>=this.startDate&&t[h][l]<=this.endDate&&(c+=" in-range ",t[h][l].isSame(this.startDate)&&(c+=" start-date "),t[h][l].isSame(this.endDate)&&(c+=" end-date "));var d="r"+h+"c"+l;r+='"}r+=""}r+="",r+="
'+o+"
'+this.locale.weekLabel+""+e+"
'+t[h][0].week()+"'+t[h][l].date()+"
",r+="
";var f;if(this.timePicker){r+='
',r+=' : ",r+=' ",this.timePickerSeconds){for(r+=': "}if(this.timePicker12Hour){r+='"}r+="
"}return r},remove:function(){this.container.remove(),this.element.off(".daterangepicker"),this.element.removeData("daterangepicker")}},i.fn.daterangepicker=function(t,e){return this.each(function(){var a=i(this);a.data("daterangepicker")&&a.data("daterangepicker").remove(),a.data("daterangepicker",new s(a,t,e))}),this}}); \ No newline at end of file +!function(t,e){if("function"==typeof define&&define.amd)define(["moment","jquery","exports"],function(a,i,s){t.daterangepicker=e(t,s,a,i)});else if("undefined"!=typeof exports){var a,i=require("moment");try{a=require("jquery")}catch(s){if(a=window.jQuery,!a)throw new Error("jQuery dependency not found")}e(t,exports,i,a)}else t.daterangepicker=e(t,{},t.moment,t.jQuery||t.Zepto||t.ender||t.$)}(this,function(t,e,a,i){var s=function(t,e,a){("object"!=typeof e||null===e)&&(e={});var s="";"string"==typeof e.calender_style&&(s=e.calender_style),this.parentEl="body",this.element=i(t),this.isShowing=!1;var n='';this.parentEl=i("object"==typeof e&&e.parentEl&&i(e.parentEl).length?e.parentEl:this.parentEl),this.container=i(n).appendTo(this.parentEl),this.setOptions(e,a);var r=this.container;i.each(this.buttonClasses,function(t,e){r.find("button").addClass(e)}),this.container.find(".daterangepicker_start_input label").html(this.locale.fromLabel),this.container.find(".daterangepicker_end_input label").html(this.locale.toLabel),this.applyClass.length&&this.container.find(".applyBtn").addClass(this.applyClass),this.cancelClass.length&&this.container.find(".cancelBtn").addClass(this.cancelClass),this.container.find(".applyBtn").html(this.locale.applyLabel),this.container.find(".cancelBtn").html(this.locale.cancelLabel),this.container.find(".calendar").on("click.daterangepicker",".prev",i.proxy(this.clickPrev,this)).on("click.daterangepicker",".next",i.proxy(this.clickNext,this)).on("click.daterangepicker","td.available",i.proxy(this.clickDate,this)).on("mouseenter.daterangepicker","td.available",i.proxy(this.hoverDate,this)).on("mouseleave.daterangepicker","td.available",i.proxy(this.updateFormInputs,this)).on("change.daterangepicker","select.yearselect",i.proxy(this.updateMonthYear,this)).on("change.daterangepicker","select.monthselect",i.proxy(this.updateMonthYear,this)).on("change.daterangepicker","select.hourselect,select.minuteselect,select.secondselect,select.ampmselect",i.proxy(this.updateTime,this)),this.container.find(".ranges").on("click.daterangepicker","button.applyBtn",i.proxy(this.clickApply,this)).on("click.daterangepicker","button.cancelBtn",i.proxy(this.clickCancel,this)).on("click.daterangepicker",".daterangepicker_start_input,.daterangepicker_end_input",i.proxy(this.showCalendars,this)).on("change.daterangepicker",".daterangepicker_start_input,.daterangepicker_end_input",i.proxy(this.inputsChanged,this)).on("keydown.daterangepicker",".daterangepicker_start_input,.daterangepicker_end_input",i.proxy(this.inputsKeydown,this)).on("click.daterangepicker","li",i.proxy(this.clickRange,this)).on("mouseenter.daterangepicker","li",i.proxy(this.enterRange,this)).on("mouseleave.daterangepicker","li",i.proxy(this.updateFormInputs,this)),this.element.is("input")?this.element.on({"click.daterangepicker":i.proxy(this.show,this),"focus.daterangepicker":i.proxy(this.show,this),"keyup.daterangepicker":i.proxy(this.updateFromControl,this)}):this.element.on("click.daterangepicker",i.proxy(this.toggle,this))};s.prototype={constructor:s,setOptions:function(t,e){if(this.startDate=a().startOf("day"),this.endDate=a().endOf("day"),this.timeZone=a().zone(),this.minDate=!1,this.maxDate=!1,this.dateLimit=!1,this.showDropdowns=!1,this.showWeekNumbers=!1,this.timePicker=!1,this.timePickerSeconds=!1,this.timePickerIncrement=30,this.timePicker12Hour=!0,this.singleDatePicker=!1,this.ranges={},this.opens="right",this.element.hasClass("pull-right")&&(this.opens="left"),this.buttonClasses=["btn","btn-small btn-sm"],this.applyClass="btn-success",this.cancelClass="btn-default",this.format="MM/DD/YYYY",this.separator=" - ",this.locale={applyLabel:"Apply",cancelLabel:"Cancel",fromLabel:"From",toLabel:"To",weekLabel:"W",customRangeLabel:"Custom Range",daysOfWeek:a.weekdaysMin(),monthNames:a.monthsShort(),firstDay:a.localeData()._week.dow},this.cb=function(){},"string"==typeof t.format&&(this.format=t.format),"string"==typeof t.separator&&(this.separator=t.separator),"string"==typeof t.startDate&&(this.startDate=a(t.startDate,this.format)),"string"==typeof t.endDate&&(this.endDate=a(t.endDate,this.format)),"string"==typeof t.minDate&&(this.minDate=a(t.minDate,this.format)),"string"==typeof t.maxDate&&(this.maxDate=a(t.maxDate,this.format)),"object"==typeof t.startDate&&(this.startDate=a(t.startDate)),"object"==typeof t.endDate&&(this.endDate=a(t.endDate)),"object"==typeof t.minDate&&(this.minDate=a(t.minDate)),"object"==typeof t.maxDate&&(this.maxDate=a(t.maxDate)),"string"==typeof t.applyClass&&(this.applyClass=t.applyClass),"string"==typeof t.cancelClass&&(this.cancelClass=t.cancelClass),"object"==typeof t.dateLimit&&(this.dateLimit=t.dateLimit),"object"==typeof t.locale&&("object"==typeof t.locale.daysOfWeek&&(this.locale.daysOfWeek=t.locale.daysOfWeek.slice()),"object"==typeof t.locale.monthNames&&(this.locale.monthNames=t.locale.monthNames.slice()),"number"==typeof t.locale.firstDay&&(this.locale.firstDay=t.locale.firstDay),"string"==typeof t.locale.applyLabel&&(this.locale.applyLabel=t.locale.applyLabel),"string"==typeof t.locale.cancelLabel&&(this.locale.cancelLabel=t.locale.cancelLabel),"string"==typeof t.locale.fromLabel&&(this.locale.fromLabel=t.locale.fromLabel),"string"==typeof t.locale.toLabel&&(this.locale.toLabel=t.locale.toLabel),"string"==typeof t.locale.weekLabel&&(this.locale.weekLabel=t.locale.weekLabel),"string"==typeof t.locale.customRangeLabel&&(this.locale.customRangeLabel=t.locale.customRangeLabel)),"string"==typeof t.opens&&(this.opens=t.opens),"boolean"==typeof t.showWeekNumbers&&(this.showWeekNumbers=t.showWeekNumbers),"string"==typeof t.buttonClasses&&(this.buttonClasses=[t.buttonClasses]),"object"==typeof t.buttonClasses&&(this.buttonClasses=t.buttonClasses),"boolean"==typeof t.showDropdowns&&(this.showDropdowns=t.showDropdowns),"boolean"==typeof t.singleDatePicker&&(this.singleDatePicker=t.singleDatePicker,this.singleDatePicker&&(this.endDate=this.startDate.clone())),"boolean"==typeof t.timePicker&&(this.timePicker=t.timePicker),"boolean"==typeof t.timePickerSeconds&&(this.timePickerSeconds=t.timePickerSeconds),"number"==typeof t.timePickerIncrement&&(this.timePickerIncrement=t.timePickerIncrement),"boolean"==typeof t.timePicker12Hour&&(this.timePicker12Hour=t.timePicker12Hour),0!=this.locale.firstDay)for(var s=this.locale.firstDay;s>0;)this.locale.daysOfWeek.push(this.locale.daysOfWeek.shift()),s--;var n,r,o;if("undefined"==typeof t.startDate&&"undefined"==typeof t.endDate&&i(this.element).is("input[type=text]")){var h=i(this.element).val(),l=h.split(this.separator);n=r=null,2==l.length?(n=a(l[0],this.format),r=a(l[1],this.format)):this.singleDatePicker&&""!==h&&(n=a(h,this.format),r=a(h,this.format)),null!==n&&null!==r&&(this.startDate=n,this.endDate=r)}if("string"==typeof t.timeZone||"number"==typeof t.timeZone?(this.timeZone=t.timeZone,this.startDate.zone(this.timeZone),this.endDate.zone(this.timeZone)):this.timeZone=a(this.startDate).zone(),"object"==typeof t.ranges){for(o in t.ranges)n="string"==typeof t.ranges[o][0]?a(t.ranges[o][0],this.format):a(t.ranges[o][0]),r="string"==typeof t.ranges[o][1]?a(t.ranges[o][1],this.format):a(t.ranges[o][1]),this.minDate&&n.isBefore(this.minDate)&&(n=a(this.minDate)),this.maxDate&&r.isAfter(this.maxDate)&&(r=a(this.maxDate)),this.minDate&&r.isBefore(this.minDate)||this.maxDate&&n.isAfter(this.maxDate)||(this.ranges[o]=[n,r]);var c="
    ";for(o in this.ranges)c+="
  • "+o+"
  • ";c+="
  • "+this.locale.customRangeLabel+"
  • ",c+="
",this.container.find(".ranges ul").remove(),this.container.find(".ranges").prepend(c)}if("function"==typeof e&&(this.cb=e),this.timePicker||(this.startDate=this.startDate.startOf("day"),this.endDate=this.endDate.endOf("day")),this.singleDatePicker?(this.opens="right",this.container.addClass("single"),this.container.find(".calendar.right").show(),this.container.find(".calendar.left").hide(),this.timePicker?this.container.find(".ranges .daterangepicker_start_input, .ranges .daterangepicker_end_input").hide():this.container.find(".ranges").hide(),this.container.find(".calendar.right").hasClass("single")||this.container.find(".calendar.right").addClass("single")):(this.container.removeClass("single"),this.container.find(".calendar.right").removeClass("single"),this.container.find(".ranges").show()),this.oldStartDate=this.startDate.clone(),this.oldEndDate=this.endDate.clone(),this.oldChosenLabel=this.chosenLabel,this.leftCalendar={month:a([this.startDate.year(),this.startDate.month(),1,this.startDate.hour(),this.startDate.minute(),this.startDate.second()]),calendar:[]},this.rightCalendar={month:a([this.endDate.year(),this.endDate.month(),1,this.endDate.hour(),this.endDate.minute(),this.endDate.second()]),calendar:[]},"right"==this.opens||"center"==this.opens){var d=this.container.find(".calendar.first"),f=this.container.find(".calendar.second");f.hasClass("single")&&(f.removeClass("single"),d.addClass("single")),d.removeClass("left").addClass("right"),f.removeClass("right").addClass("left"),this.singleDatePicker&&(d.show(),f.hide())}"undefined"!=typeof t.ranges||this.singleDatePicker||this.container.addClass("show-calendar"),this.container.addClass("opens"+this.opens),this.updateView(),this.updateCalendars()},setStartDate:function(t){"string"==typeof t&&(this.startDate=a(t,this.format).zone(this.timeZone)),"object"==typeof t&&(this.startDate=a(t)),this.timePicker||(this.startDate=this.startDate.startOf("day")),this.oldStartDate=this.startDate.clone(),this.updateView(),this.updateCalendars(),this.updateInputText()},setEndDate:function(t){"string"==typeof t&&(this.endDate=a(t,this.format).zone(this.timeZone)),"object"==typeof t&&(this.endDate=a(t)),this.timePicker||(this.endDate=this.endDate.endOf("day")),this.oldEndDate=this.endDate.clone(),this.updateView(),this.updateCalendars(),this.updateInputText()},updateView:function(){this.leftCalendar.month.month(this.startDate.month()).year(this.startDate.year()).hour(this.startDate.hour()).minute(this.startDate.minute()),this.rightCalendar.month.month(this.endDate.month()).year(this.endDate.year()).hour(this.endDate.hour()).minute(this.endDate.minute()),this.updateFormInputs()},updateFormInputs:function(){this.container.find("input[name=daterangepicker_start]").val(this.startDate.format(this.format)),this.container.find("input[name=daterangepicker_end]").val(this.endDate.format(this.format)),this.startDate.isSame(this.endDate)||this.startDate.isBefore(this.endDate)?this.container.find("button.applyBtn").removeAttr("disabled"):this.container.find("button.applyBtn").attr("disabled","disabled")},updateFromControl:function(){if(this.element.is("input")&&this.element.val().length){var t=this.element.val().split(this.separator),e=null,i=null;2===t.length&&(e=a(t[0],this.format).zone(this.timeZone),i=a(t[1],this.format).zone(this.timeZone)),(this.singleDatePicker||null===e||null===i)&&(e=a(this.element.val(),this.format).zone(this.timeZone),i=e),i.isBefore(e)||(this.oldStartDate=this.startDate.clone(),this.oldEndDate=this.endDate.clone(),this.startDate=e,this.endDate=i,this.startDate.isSame(this.oldStartDate)&&this.endDate.isSame(this.oldEndDate)||this.notify(),this.updateCalendars())}},notify:function(){this.updateView(),this.cb(this.startDate,this.endDate,this.chosenLabel)},move:function(){var t={top:0,left:0},e=i(window).width();this.parentEl.is("body")||(t={top:this.parentEl.offset().top-this.parentEl.scrollTop(),left:this.parentEl.offset().left-this.parentEl.scrollLeft()},e=this.parentEl[0].clientWidth+this.parentEl.offset().left),"left"==this.opens?(this.container.css({top:this.element.offset().top+this.element.outerHeight()-t.top,right:e-this.element.offset().left-this.element.outerWidth(),left:"auto"}),this.container.offset().left<0&&this.container.css({right:"auto",left:9})):"center"==this.opens?(this.container.css({top:this.element.offset().top+this.element.outerHeight()-t.top,left:this.element.offset().left-t.left+this.element.outerWidth()/2-this.container.outerWidth()/2,right:"auto"}),this.container.offset().left<0&&this.container.css({right:"auto",left:9})):(this.container.css({top:this.element.offset().top+this.element.outerHeight()-t.top,left:this.element.offset().left-t.left,right:"auto"}),this.container.offset().left+this.container.outerWidth()>i(window).width()&&this.container.css({left:"auto",right:0}))},toggle:function(){this.element.hasClass("active")?this.hide():this.show()},show:function(){this.isShowing||(this.element.addClass("active"),this.container.show(),this.move(),this._outsideClickProxy=i.proxy(function(t){this.outsideClick(t)},this),i(document).on("mousedown.daterangepicker",this._outsideClickProxy).on("touchend.daterangepicker",this._outsideClickProxy).on("click.daterangepicker","[data-toggle=dropdown]",this._outsideClickProxy).on("focusin.daterangepicker",this._outsideClickProxy),this.isShowing=!0,this.element.trigger("show.daterangepicker",this))},outsideClick:function(t){var e=i(t.target);"focusin"==t.type||e.closest(this.element).length||e.closest(this.container).length||e.closest(".calendar-date").length||this.hide()},hide:function(){this.isShowing&&(i(document).off(".daterangepicker"),this.element.removeClass("active"),this.container.hide(),this.startDate.isSame(this.oldStartDate)&&this.endDate.isSame(this.oldEndDate)||this.notify(),this.oldStartDate=this.startDate.clone(),this.oldEndDate=this.endDate.clone(),this.isShowing=!1,this.element.trigger("hide.daterangepicker",this))},enterRange:function(t){var e=t.target.innerHTML;if(e==this.locale.customRangeLabel)this.updateView();else{var a=this.ranges[e];this.container.find("input[name=daterangepicker_start]").val(a[0].format(this.format)),this.container.find("input[name=daterangepicker_end]").val(a[1].format(this.format))}},showCalendars:function(){this.container.addClass("show-calendar"),this.move(),this.element.trigger("showCalendar.daterangepicker",this)},hideCalendars:function(){this.container.removeClass("show-calendar"),this.element.trigger("hideCalendar.daterangepicker",this)},inputsChanged:function(t){var e=i(t.target),s=a(e.val(),this.format);if(s.isValid()){var n,r;"daterangepicker_start"===e.attr("name")?(n=s,r=this.endDate):(n=this.startDate,r=s),this.setCustomDates(n,r)}},inputsKeydown:function(t){13===t.keyCode&&(this.inputsChanged(t),this.notify())},updateInputText:function(){this.element.is("input")&&!this.singleDatePicker?this.element.val(this.startDate.format(this.format)+this.separator+this.endDate.format(this.format)):this.element.is("input")&&this.element.val(this.endDate.format(this.format))},clickRange:function(t){var e=t.target.innerHTML;if(this.chosenLabel=e,e==this.locale.customRangeLabel)this.showCalendars();else{var a=this.ranges[e];this.startDate=a[0],this.endDate=a[1],this.timePicker||(this.startDate.startOf("day"),this.endDate.endOf("day")),this.leftCalendar.month.month(this.startDate.month()).year(this.startDate.year()).hour(this.startDate.hour()).minute(this.startDate.minute()),this.rightCalendar.month.month(this.endDate.month()).year(this.endDate.year()).hour(this.endDate.hour()).minute(this.endDate.minute()),this.updateCalendars(),this.updateInputText(),this.hideCalendars(),this.hide(),this.element.trigger("apply.daterangepicker",this)}},clickPrev:function(t){var e=i(t.target).parents(".calendar");e.hasClass("left")?this.leftCalendar.month.subtract(1,"month"):this.rightCalendar.month.subtract(1,"month"),this.updateCalendars()},clickNext:function(t){var e=i(t.target).parents(".calendar");e.hasClass("left")?this.leftCalendar.month.add(1,"month"):this.rightCalendar.month.add(1,"month"),this.updateCalendars()},hoverDate:function(t){var e=i(t.target).attr("data-title"),a=e.substr(1,1),s=e.substr(3,1),n=i(t.target).parents(".calendar");n.hasClass("left")?this.container.find("input[name=daterangepicker_start]").val(this.leftCalendar.calendar[a][s].format(this.format)):this.container.find("input[name=daterangepicker_end]").val(this.rightCalendar.calendar[a][s].format(this.format))},setCustomDates:function(t,e){if(this.chosenLabel=this.locale.customRangeLabel,t.isAfter(e)){var i=this.endDate.diff(this.startDate);e=a(t).add(i,"ms")}this.startDate=t,this.endDate=e,this.updateView(),this.updateCalendars()},clickDate:function(t){var e,s,n=i(t.target).attr("data-title"),r=n.substr(1,1),o=n.substr(3,1),h=i(t.target).parents(".calendar");if(h.hasClass("left")){if(e=this.leftCalendar.calendar[r][o],s=this.endDate,"object"==typeof this.dateLimit){var l=a(e).add(this.dateLimit).startOf("day");s.isAfter(l)&&(s=l)}}else if(e=this.startDate,s=this.rightCalendar.calendar[r][o],"object"==typeof this.dateLimit){var c=a(s).subtract(this.dateLimit).startOf("day");e.isBefore(c)&&(e=c)}this.singleDatePicker&&h.hasClass("left")?s=e.clone():this.singleDatePicker&&h.hasClass("right")&&(e=s.clone()),h.find("td").removeClass("active"),i(t.target).addClass("active"),this.setCustomDates(e,s),this.timePicker||s.endOf("day"),this.singleDatePicker&&!this.timePicker&&this.clickApply()},clickApply:function(){this.updateInputText(),this.hide(),this.element.trigger("apply.daterangepicker",this)},clickCancel:function(){this.startDate=this.oldStartDate,this.endDate=this.oldEndDate,this.chosenLabel=this.oldChosenLabel,this.updateView(),this.updateCalendars(),this.hide(),this.element.trigger("cancel.daterangepicker",this)},updateMonthYear:function(t){var e=i(t.target).closest(".calendar").hasClass("left"),a=e?"left":"right",s=this.container.find(".calendar."+a),n=parseInt(s.find(".monthselect").val(),10),r=s.find(".yearselect").val();this[a+"Calendar"].month.month(n).year(r),this.updateCalendars()},updateTime:function(t){var e=i(t.target).closest(".calendar"),a=e.hasClass("left"),s=parseInt(e.find(".hourselect").val(),10),n=parseInt(e.find(".minuteselect").val(),10),r=0;if(this.timePickerSeconds&&(r=parseInt(e.find(".secondselect").val(),10)),this.timePicker12Hour){var o=e.find(".ampmselect").val();"PM"===o&&12>s&&(s+=12),"AM"===o&&12===s&&(s=0)}if(a){var h=this.startDate.clone();h.hour(s),h.minute(n),h.second(r),this.startDate=h,this.leftCalendar.month.hour(s).minute(n).second(r),this.singleDatePicker&&(this.endDate=h.clone())}else{var l=this.endDate.clone();l.hour(s),l.minute(n),l.second(r),this.endDate=l,this.singleDatePicker&&(this.startDate=l.clone()),this.rightCalendar.month.hour(s).minute(n).second(r)}this.updateView(),this.updateCalendars()},updateCalendars:function(){this.leftCalendar.calendar=this.buildCalendar(this.leftCalendar.month.month(),this.leftCalendar.month.year(),this.leftCalendar.month.hour(),this.leftCalendar.month.minute(),this.leftCalendar.month.second(),"left"),this.rightCalendar.calendar=this.buildCalendar(this.rightCalendar.month.month(),this.rightCalendar.month.year(),this.rightCalendar.month.hour(),this.rightCalendar.month.minute(),this.rightCalendar.month.second(),"right"),this.container.find(".calendar.left").empty().html(this.renderCalendar(this.leftCalendar.calendar,this.startDate,this.minDate,this.maxDate,"left")),this.container.find(".calendar.right").empty().html(this.renderCalendar(this.rightCalendar.calendar,this.endDate,this.singleDatePicker?this.minDate:this.startDate,this.maxDate,"right")),this.container.find(".ranges li").removeClass("active");var t=!0,e=0;for(var a in this.ranges)this.timePicker?this.startDate.isSame(this.ranges[a][0])&&this.endDate.isSame(this.ranges[a][1])&&(t=!1,this.chosenLabel=this.container.find(".ranges li:eq("+e+")").addClass("active").html()):this.startDate.format("YYYY-MM-DD")==this.ranges[a][0].format("YYYY-MM-DD")&&this.endDate.format("YYYY-MM-DD")==this.ranges[a][1].format("YYYY-MM-DD")&&(t=!1,this.chosenLabel=this.container.find(".ranges li:eq("+e+")").addClass("active").html()),e++;t&&(this.chosenLabel=this.container.find(".ranges li:last").addClass("active").html(),this.showCalendars())},buildCalendar:function(t,e,i,s,n,r){var o,h=a([e,t]).daysInMonth(),l=a([e,t,1]),c=a([e,t,h]),d=a(l).subtract(1,"month").month(),f=a(l).subtract(1,"month").year(),m=a([f,d]).daysInMonth(),p=l.day(),u=[];for(u.firstDay=l,u.lastDay=c,o=0;6>o;o++)u[o]=[];var D=m-p+this.locale.firstDay+1;D>m&&(D-=7),p==this.locale.firstDay&&(D=m-6);var g,y,k=a([f,d,D,12,s,n]).zone(this.timeZone);for(o=0,g=0,y=0;42>o;o++,g++,k=a(k).add(24,"hour"))o>0&&g%7===0&&(g=0,y++),u[y][g]=k.clone().hour(i),k.hour(12),this.minDate&&u[y][g].format("YYYY-MM-DD")==this.minDate.format("YYYY-MM-DD")&&u[y][g].isBefore(this.minDate)&&"left"==r&&(u[y][g]=this.minDate.clone()),this.maxDate&&u[y][g].format("YYYY-MM-DD")==this.maxDate.format("YYYY-MM-DD")&&u[y][g].isAfter(this.maxDate)&&"right"==r&&(u[y][g]=this.maxDate.clone());return u},renderDropdowns:function(t,e,a){for(var i=t.month(),s=t.year(),n=a&&a.year()||s+5,r=e&&e.year()||s-50,o='";for(var d='",o+d},renderCalendar:function(t,e,a,s,n){var r='
';r+='',r+="",r+="",this.showWeekNumbers&&(r+=""),r+=!a||a.isBefore(t.firstDay)?'':"";var o=this.locale.monthNames[t[1][1].month()]+t[1][1].format(" YYYY");this.showDropdowns&&(o=this.renderDropdowns(t[1][1],a,s)),r+='",r+=!s||s.isAfter(t.lastDay)?'':"",r+="",r+="",this.showWeekNumbers&&(r+='"),i.each(this.locale.daysOfWeek,function(t,e){r+=""}),r+="",r+="",r+="";for(var h=0;6>h;h++){r+="",this.showWeekNumbers&&(r+='");for(var l=0;7>l;l++){var c="available ";c+=t[h][l].month()==t[1][1].month()?"":"off",a&&t[h][l].isBefore(a,"day")||s&&t[h][l].isAfter(s,"day")?c=" off disabled ":t[h][l].format("YYYY-MM-DD")==e.format("YYYY-MM-DD")?(c+=" active ",t[h][l].format("YYYY-MM-DD")==this.startDate.format("YYYY-MM-DD")&&(c+=" start-date "),t[h][l].format("YYYY-MM-DD")==this.endDate.format("YYYY-MM-DD")&&(c+=" end-date ")):t[h][l]>=this.startDate&&t[h][l]<=this.endDate&&(c+=" in-range ",t[h][l].isSame(this.startDate)&&(c+=" start-date "),t[h][l].isSame(this.endDate)&&(c+=" end-date "));var d="r"+h+"c"+l;r+='"}r+=""}r+="",r+="
'+o+"
'+this.locale.weekLabel+""+e+"
'+t[h][0].week()+"'+t[h][l].date()+"
",r+="
";var f;if(this.timePicker){r+='
',r+=' : ",r+=' ",this.timePickerSeconds){for(r+=': "}if(this.timePicker12Hour){r+='"}r+="
"}return r},remove:function(){this.container.remove(),this.element.off(".daterangepicker"),this.element.removeData("daterangepicker")}},i.fn.daterangepicker=function(t,e){return this.each(function(){var a=i(this);a.data("daterangepicker")&&a.data("daterangepicker").remove(),a.data("daterangepicker",new s(a,t,e))}),this}}); diff --git a/ui/js/moment/moment.min.js b/ui/js/moment/moment.min.js index 024d488f..c7f6dcd4 100755 --- a/ui/js/moment/moment.min.js +++ b/ui/js/moment/moment.min.js @@ -4,4 +4,4 @@ //! license : MIT //! momentjs.com (function(a){function b(a,b,c){switch(arguments.length){case 2:return null!=a?a:b;case 3:return null!=a?a:null!=b?b:c;default:throw new Error("Implement me")}}function c(a,b){return Bb.call(a,b)}function d(){return{empty:!1,unusedTokens:[],unusedInput:[],overflow:-2,charsLeftOver:0,nullInput:!1,invalidMonth:null,invalidFormat:!1,userInvalidated:!1,iso:!1}}function e(a){vb.suppressDeprecationWarnings===!1&&"undefined"!=typeof console&&console.warn&&console.warn("Deprecation warning: "+a)}function f(a,b){var c=!0;return o(function(){return c&&(e(a),c=!1),b.apply(this,arguments)},b)}function g(a,b){sc[a]||(e(b),sc[a]=!0)}function h(a,b){return function(c){return r(a.call(this,c),b)}}function i(a,b){return function(c){return this.localeData().ordinal(a.call(this,c),b)}}function j(a,b){var c,d,e=12*(b.year()-a.year())+(b.month()-a.month()),f=a.clone().add(e,"months");return 0>b-f?(c=a.clone().add(e-1,"months"),d=(b-f)/(f-c)):(c=a.clone().add(e+1,"months"),d=(b-f)/(c-f)),-(e+d)}function k(a,b,c){var d;return null==c?b:null!=a.meridiemHour?a.meridiemHour(b,c):null!=a.isPM?(d=a.isPM(c),d&&12>b&&(b+=12),d||12!==b||(b=0),b):b}function l(){}function m(a,b){b!==!1&&H(a),p(this,a),this._d=new Date(+a._d),uc===!1&&(uc=!0,vb.updateOffset(this),uc=!1)}function n(a){var b=A(a),c=b.year||0,d=b.quarter||0,e=b.month||0,f=b.week||0,g=b.day||0,h=b.hour||0,i=b.minute||0,j=b.second||0,k=b.millisecond||0;this._milliseconds=+k+1e3*j+6e4*i+36e5*h,this._days=+g+7*f,this._months=+e+3*d+12*c,this._data={},this._locale=vb.localeData(),this._bubble()}function o(a,b){for(var d in b)c(b,d)&&(a[d]=b[d]);return c(b,"toString")&&(a.toString=b.toString),c(b,"valueOf")&&(a.valueOf=b.valueOf),a}function p(a,b){var c,d,e;if("undefined"!=typeof b._isAMomentObject&&(a._isAMomentObject=b._isAMomentObject),"undefined"!=typeof b._i&&(a._i=b._i),"undefined"!=typeof b._f&&(a._f=b._f),"undefined"!=typeof b._l&&(a._l=b._l),"undefined"!=typeof b._strict&&(a._strict=b._strict),"undefined"!=typeof b._tzm&&(a._tzm=b._tzm),"undefined"!=typeof b._isUTC&&(a._isUTC=b._isUTC),"undefined"!=typeof b._offset&&(a._offset=b._offset),"undefined"!=typeof b._pf&&(a._pf=b._pf),"undefined"!=typeof b._locale&&(a._locale=b._locale),Kb.length>0)for(c in Kb)d=Kb[c],e=b[d],"undefined"!=typeof e&&(a[d]=e);return a}function q(a){return 0>a?Math.ceil(a):Math.floor(a)}function r(a,b,c){for(var d=""+Math.abs(a),e=a>=0;d.lengthd;d++)(c&&a[d]!==b[d]||!c&&C(a[d])!==C(b[d]))&&g++;return g+f}function z(a){if(a){var b=a.toLowerCase().replace(/(.)s$/,"$1");a=lc[a]||mc[b]||b}return a}function A(a){var b,d,e={};for(d in a)c(a,d)&&(b=z(d),b&&(e[b]=a[d]));return e}function B(b){var c,d;if(0===b.indexOf("week"))c=7,d="day";else{if(0!==b.indexOf("month"))return;c=12,d="month"}vb[b]=function(e,f){var g,h,i=vb._locale[b],j=[];if("number"==typeof e&&(f=e,e=a),h=function(a){var b=vb().utc().set(d,a);return i.call(vb._locale,b,e||"")},null!=f)return h(f);for(g=0;c>g;g++)j.push(h(g));return j}}function C(a){var b=+a,c=0;return 0!==b&&isFinite(b)&&(c=b>=0?Math.floor(b):Math.ceil(b)),c}function D(a,b){return new Date(Date.UTC(a,b+1,0)).getUTCDate()}function E(a,b,c){return jb(vb([a,11,31+b-c]),b,c).week}function F(a){return G(a)?366:365}function G(a){return a%4===0&&a%100!==0||a%400===0}function H(a){var b;a._a&&-2===a._pf.overflow&&(b=a._a[Db]<0||a._a[Db]>11?Db:a._a[Eb]<1||a._a[Eb]>D(a._a[Cb],a._a[Db])?Eb:a._a[Fb]<0||a._a[Fb]>24||24===a._a[Fb]&&(0!==a._a[Gb]||0!==a._a[Hb]||0!==a._a[Ib])?Fb:a._a[Gb]<0||a._a[Gb]>59?Gb:a._a[Hb]<0||a._a[Hb]>59?Hb:a._a[Ib]<0||a._a[Ib]>999?Ib:-1,a._pf._overflowDayOfYear&&(Cb>b||b>Eb)&&(b=Eb),a._pf.overflow=b)}function I(b){return null==b._isValid&&(b._isValid=!isNaN(b._d.getTime())&&b._pf.overflow<0&&!b._pf.empty&&!b._pf.invalidMonth&&!b._pf.nullInput&&!b._pf.invalidFormat&&!b._pf.userInvalidated,b._strict&&(b._isValid=b._isValid&&0===b._pf.charsLeftOver&&0===b._pf.unusedTokens.length&&b._pf.bigHour===a)),b._isValid}function J(a){return a?a.toLowerCase().replace("_","-"):a}function K(a){for(var b,c,d,e,f=0;f0;){if(d=L(e.slice(0,b).join("-")))return d;if(c&&c.length>=b&&y(e,c,!0)>=b-1)break;b--}f++}return null}function L(a){var b=null;if(!Jb[a]&&Lb)try{b=vb.locale(),require("./locale/"+a),vb.locale(b)}catch(c){}return Jb[a]}function M(a,b){var c,d;return b._isUTC?(c=b.clone(),d=(vb.isMoment(a)||x(a)?+a:+vb(a))-+c,c._d.setTime(+c._d+d),vb.updateOffset(c,!1),c):vb(a).local()}function N(a){return a.match(/\[[\s\S]/)?a.replace(/^\[|\]$/g,""):a.replace(/\\/g,"")}function O(a){var b,c,d=a.match(Pb);for(b=0,c=d.length;c>b;b++)d[b]=rc[d[b]]?rc[d[b]]:N(d[b]);return function(e){var f="";for(b=0;c>b;b++)f+=d[b]instanceof Function?d[b].call(e,a):d[b];return f}}function P(a,b){return a.isValid()?(b=Q(b,a.localeData()),nc[b]||(nc[b]=O(b)),nc[b](a)):a.localeData().invalidDate()}function Q(a,b){function c(a){return b.longDateFormat(a)||a}var d=5;for(Qb.lastIndex=0;d>=0&&Qb.test(a);)a=a.replace(Qb,c),Qb.lastIndex=0,d-=1;return a}function R(a,b){var c,d=b._strict;switch(a){case"Q":return _b;case"DDDD":return bc;case"YYYY":case"GGGG":case"gggg":return d?cc:Tb;case"Y":case"G":case"g":return ec;case"YYYYYY":case"YYYYY":case"GGGGG":case"ggggg":return d?dc:Ub;case"S":if(d)return _b;case"SS":if(d)return ac;case"SSS":if(d)return bc;case"DDD":return Sb;case"MMM":case"MMMM":case"dd":case"ddd":case"dddd":return Wb;case"a":case"A":return b._locale._meridiemParse;case"x":return Zb;case"X":return $b;case"Z":case"ZZ":return Xb;case"T":return Yb;case"SSSS":return Vb;case"MM":case"DD":case"YY":case"GG":case"gg":case"HH":case"hh":case"mm":case"ss":case"ww":case"WW":return d?ac:Rb;case"M":case"D":case"d":case"H":case"h":case"m":case"s":case"w":case"W":case"e":case"E":return Rb;case"Do":return d?b._locale._ordinalParse:b._locale._ordinalParseLenient;default:return c=new RegExp($(Z(a.replace("\\","")),"i"))}}function S(a){a=a||"";var b=a.match(Xb)||[],c=b[b.length-1]||[],d=(c+"").match(jc)||["-",0,0],e=+(60*d[1])+C(d[2]);return"+"===d[0]?e:-e}function T(a,b,c){var d,e=c._a;switch(a){case"Q":null!=b&&(e[Db]=3*(C(b)-1));break;case"M":case"MM":null!=b&&(e[Db]=C(b)-1);break;case"MMM":case"MMMM":d=c._locale.monthsParse(b,a,c._strict),null!=d?e[Db]=d:c._pf.invalidMonth=b;break;case"D":case"DD":null!=b&&(e[Eb]=C(b));break;case"Do":null!=b&&(e[Eb]=C(parseInt(b.match(/\d{1,2}/)[0],10)));break;case"DDD":case"DDDD":null!=b&&(c._dayOfYear=C(b));break;case"YY":e[Cb]=vb.parseTwoDigitYear(b);break;case"YYYY":case"YYYYY":case"YYYYYY":e[Cb]=C(b);break;case"a":case"A":c._meridiem=b;break;case"h":case"hh":c._pf.bigHour=!0;case"H":case"HH":e[Fb]=C(b);break;case"m":case"mm":e[Gb]=C(b);break;case"s":case"ss":e[Hb]=C(b);break;case"S":case"SS":case"SSS":case"SSSS":e[Ib]=C(1e3*("0."+b));break;case"x":c._d=new Date(C(b));break;case"X":c._d=new Date(1e3*parseFloat(b));break;case"Z":case"ZZ":c._useUTC=!0,c._tzm=S(b);break;case"dd":case"ddd":case"dddd":d=c._locale.weekdaysParse(b),null!=d?(c._w=c._w||{},c._w.d=d):c._pf.invalidWeekday=b;break;case"w":case"ww":case"W":case"WW":case"d":case"e":case"E":a=a.substr(0,1);case"gggg":case"GGGG":case"GGGGG":a=a.substr(0,2),b&&(c._w=c._w||{},c._w[a]=C(b));break;case"gg":case"GG":c._w=c._w||{},c._w[a]=vb.parseTwoDigitYear(b)}}function U(a){var c,d,e,f,g,h,i;c=a._w,null!=c.GG||null!=c.W||null!=c.E?(g=1,h=4,d=b(c.GG,a._a[Cb],jb(vb(),1,4).year),e=b(c.W,1),f=b(c.E,1)):(g=a._locale._week.dow,h=a._locale._week.doy,d=b(c.gg,a._a[Cb],jb(vb(),g,h).year),e=b(c.w,1),null!=c.d?(f=c.d,g>f&&++e):f=null!=c.e?c.e+g:g),i=kb(d,e,f,h,g),a._a[Cb]=i.year,a._dayOfYear=i.dayOfYear}function V(a){var c,d,e,f,g=[];if(!a._d){for(e=X(a),a._w&&null==a._a[Eb]&&null==a._a[Db]&&U(a),a._dayOfYear&&(f=b(a._a[Cb],e[Cb]),a._dayOfYear>F(f)&&(a._pf._overflowDayOfYear=!0),d=fb(f,0,a._dayOfYear),a._a[Db]=d.getUTCMonth(),a._a[Eb]=d.getUTCDate()),c=0;3>c&&null==a._a[c];++c)a._a[c]=g[c]=e[c];for(;7>c;c++)a._a[c]=g[c]=null==a._a[c]?2===c?1:0:a._a[c];24===a._a[Fb]&&0===a._a[Gb]&&0===a._a[Hb]&&0===a._a[Ib]&&(a._nextDay=!0,a._a[Fb]=0),a._d=(a._useUTC?fb:eb).apply(null,g),null!=a._tzm&&a._d.setUTCMinutes(a._d.getUTCMinutes()-a._tzm),a._nextDay&&(a._a[Fb]=24)}}function W(a){var b;a._d||(b=A(a._i),a._a=[b.year,b.month,b.day||b.date,b.hour,b.minute,b.second,b.millisecond],V(a))}function X(a){var b=new Date;return a._useUTC?[b.getUTCFullYear(),b.getUTCMonth(),b.getUTCDate()]:[b.getFullYear(),b.getMonth(),b.getDate()]}function Y(b){if(b._f===vb.ISO_8601)return void ab(b);b._a=[],b._pf.empty=!0;var c,d,e,f,g,h=""+b._i,i=h.length,j=0;for(e=Q(b._f,b._locale).match(Pb)||[],c=0;c0&&b._pf.unusedInput.push(g),h=h.slice(h.indexOf(d)+d.length),j+=d.length),rc[f]?(d?b._pf.empty=!1:b._pf.unusedTokens.push(f),T(f,d,b)):b._strict&&!d&&b._pf.unusedTokens.push(f);b._pf.charsLeftOver=i-j,h.length>0&&b._pf.unusedInput.push(h),b._pf.bigHour===!0&&b._a[Fb]<=12&&(b._pf.bigHour=a),b._a[Fb]=k(b._locale,b._a[Fb],b._meridiem),V(b),H(b)}function Z(a){return a.replace(/\\(\[)|\\(\])|\[([^\]\[]*)\]|\\(.)/g,function(a,b,c,d,e){return b||c||d||e})}function $(a){return a.replace(/[-\/\\^$*+?.()|[\]{}]/g,"\\$&")}function _(a){var b,c,e,f,g;if(0===a._f.length)return a._pf.invalidFormat=!0,void(a._d=new Date(0/0));for(f=0;fg)&&(e=g,c=b));o(a,c||b)}function ab(a){var b,c,d=a._i,e=fc.exec(d);if(e){for(a._pf.iso=!0,b=0,c=hc.length;c>b;b++)if(hc[b][1].exec(d)){a._f=hc[b][0]+(e[6]||" ");break}for(b=0,c=ic.length;c>b;b++)if(ic[b][1].exec(d)){a._f+=ic[b][0];break}d.match(Xb)&&(a._f+="Z"),Y(a)}else a._isValid=!1}function bb(a){ab(a),a._isValid===!1&&(delete a._isValid,vb.createFromInputFallback(a))}function cb(a,b){var c,d=[];for(c=0;ca&&h.setFullYear(a),h}function fb(a){var b=new Date(Date.UTC.apply(null,arguments));return 1970>a&&b.setUTCFullYear(a),b}function gb(a,b){if("string"==typeof a)if(isNaN(a)){if(a=b.weekdaysParse(a),"number"!=typeof a)return null}else a=parseInt(a,10);return a}function hb(a,b,c,d,e){return e.relativeTime(b||1,!!c,a,d)}function ib(a,b,c){var d=vb.duration(a).abs(),e=Ab(d.as("s")),f=Ab(d.as("m")),g=Ab(d.as("h")),h=Ab(d.as("d")),i=Ab(d.as("M")),j=Ab(d.as("y")),k=e0,k[4]=c,hb.apply({},k)}function jb(a,b,c){var d,e=c-b,f=c-a.day();return f>e&&(f-=7),e-7>f&&(f+=7),d=vb(a).add(f,"d"),{week:Math.ceil(d.dayOfYear()/7),year:d.year()}}function kb(a,b,c,d,e){var f,g,h=fb(a,0,1).getUTCDay();return h=0===h?7:h,c=null!=c?c:e,f=e-h+(h>d?7:0)-(e>h?7:0),g=7*(b-1)+(c-e)+f+1,{year:g>0?a:a-1,dayOfYear:g>0?g:F(a-1)+g}}function lb(b){var c,d=b._i,e=b._f;return b._locale=b._locale||vb.localeData(b._l),null===d||e===a&&""===d?vb.invalid({nullInput:!0}):("string"==typeof d&&(b._i=d=b._locale.preparse(d)),vb.isMoment(d)?new m(d,!0):(e?w(e)?_(b):Y(b):db(b),c=new m(b),c._nextDay&&(c.add(1,"d"),c._nextDay=a),c))}function mb(a,b){var c,d;if(1===b.length&&w(b[0])&&(b=b[0]),!b.length)return vb();for(c=b[0],d=1;d=0?"+":"-";return b+r(Math.abs(a),6)},gg:function(){return r(this.weekYear()%100,2)},gggg:function(){return r(this.weekYear(),4)},ggggg:function(){return r(this.weekYear(),5)},GG:function(){return r(this.isoWeekYear()%100,2)},GGGG:function(){return r(this.isoWeekYear(),4)},GGGGG:function(){return r(this.isoWeekYear(),5)},e:function(){return this.weekday()},E:function(){return this.isoWeekday()},a:function(){return this.localeData().meridiem(this.hours(),this.minutes(),!0)},A:function(){return this.localeData().meridiem(this.hours(),this.minutes(),!1)},H:function(){return this.hours()},h:function(){return this.hours()%12||12},m:function(){return this.minutes()},s:function(){return this.seconds()},S:function(){return C(this.milliseconds()/100)},SS:function(){return r(C(this.milliseconds()/10),2)},SSS:function(){return r(this.milliseconds(),3)},SSSS:function(){return r(this.milliseconds(),3)},Z:function(){var a=this.utcOffset(),b="+";return 0>a&&(a=-a,b="-"),b+r(C(a/60),2)+":"+r(C(a)%60,2)},ZZ:function(){var a=this.utcOffset(),b="+";return 0>a&&(a=-a,b="-"),b+r(C(a/60),2)+r(C(a)%60,2)},z:function(){return this.zoneAbbr()},zz:function(){return this.zoneName()},x:function(){return this.valueOf()},X:function(){return this.unix()},Q:function(){return this.quarter()}},sc={},tc=["months","monthsShort","weekdays","weekdaysShort","weekdaysMin"],uc=!1;pc.length;)xb=pc.pop(),rc[xb+"o"]=i(rc[xb],xb);for(;qc.length;)xb=qc.pop(),rc[xb+xb]=h(rc[xb],2);rc.DDDD=h(rc.DDD,3),o(l.prototype,{set:function(a){var b,c;for(c in a)b=a[c],"function"==typeof b?this[c]=b:this["_"+c]=b;this._ordinalParseLenient=new RegExp(this._ordinalParse.source+"|"+/\d{1,2}/.source)},_months:"January_February_March_April_May_June_July_August_September_October_November_December".split("_"),months:function(a){return this._months[a.month()]},_monthsShort:"Jan_Feb_Mar_Apr_May_Jun_Jul_Aug_Sep_Oct_Nov_Dec".split("_"),monthsShort:function(a){return this._monthsShort[a.month()]},monthsParse:function(a,b,c){var d,e,f;for(this._monthsParse||(this._monthsParse=[],this._longMonthsParse=[],this._shortMonthsParse=[]),d=0;12>d;d++){if(e=vb.utc([2e3,d]),c&&!this._longMonthsParse[d]&&(this._longMonthsParse[d]=new RegExp("^"+this.months(e,"").replace(".","")+"$","i"),this._shortMonthsParse[d]=new RegExp("^"+this.monthsShort(e,"").replace(".","")+"$","i")),c||this._monthsParse[d]||(f="^"+this.months(e,"")+"|^"+this.monthsShort(e,""),this._monthsParse[d]=new RegExp(f.replace(".",""),"i")),c&&"MMMM"===b&&this._longMonthsParse[d].test(a))return d;if(c&&"MMM"===b&&this._shortMonthsParse[d].test(a))return d;if(!c&&this._monthsParse[d].test(a))return d}},_weekdays:"Sunday_Monday_Tuesday_Wednesday_Thursday_Friday_Saturday".split("_"),weekdays:function(a){return this._weekdays[a.day()]},_weekdaysShort:"Sun_Mon_Tue_Wed_Thu_Fri_Sat".split("_"),weekdaysShort:function(a){return this._weekdaysShort[a.day()]},_weekdaysMin:"Su_Mo_Tu_We_Th_Fr_Sa".split("_"),weekdaysMin:function(a){return this._weekdaysMin[a.day()]},weekdaysParse:function(a){var b,c,d;for(this._weekdaysParse||(this._weekdaysParse=[]),b=0;7>b;b++)if(this._weekdaysParse[b]||(c=vb([2e3,1]).day(b),d="^"+this.weekdays(c,"")+"|^"+this.weekdaysShort(c,"")+"|^"+this.weekdaysMin(c,""),this._weekdaysParse[b]=new RegExp(d.replace(".",""),"i")),this._weekdaysParse[b].test(a))return b},_longDateFormat:{LTS:"h:mm:ss A",LT:"h:mm A",L:"MM/DD/YYYY",LL:"MMMM D, YYYY",LLL:"MMMM D, YYYY LT",LLLL:"dddd, MMMM D, YYYY LT"},longDateFormat:function(a){var b=this._longDateFormat[a];return!b&&this._longDateFormat[a.toUpperCase()]&&(b=this._longDateFormat[a.toUpperCase()].replace(/MMMM|MM|DD|dddd/g,function(a){return a.slice(1)}),this._longDateFormat[a]=b),b},isPM:function(a){return"p"===(a+"").toLowerCase().charAt(0)},_meridiemParse:/[ap]\.?m?\.?/i,meridiem:function(a,b,c){return a>11?c?"pm":"PM":c?"am":"AM"},_calendar:{sameDay:"[Today at] LT",nextDay:"[Tomorrow at] LT",nextWeek:"dddd [at] LT",lastDay:"[Yesterday at] LT",lastWeek:"[Last] dddd [at] LT",sameElse:"L"},calendar:function(a,b,c){var d=this._calendar[a];return"function"==typeof d?d.apply(b,[c]):d},_relativeTime:{future:"in %s",past:"%s ago",s:"a few seconds",m:"a minute",mm:"%d minutes",h:"an hour",hh:"%d hours",d:"a day",dd:"%d days",M:"a month",MM:"%d months",y:"a year",yy:"%d years"},relativeTime:function(a,b,c,d){var e=this._relativeTime[c];return"function"==typeof e?e(a,b,c,d):e.replace(/%d/i,a)},pastFuture:function(a,b){var c=this._relativeTime[a>0?"future":"past"];return"function"==typeof c?c(b):c.replace(/%s/i,b)},ordinal:function(a){return this._ordinal.replace("%d",a)},_ordinal:"%d",_ordinalParse:/\d{1,2}/,preparse:function(a){return a},postformat:function(a){return a},week:function(a){return jb(a,this._week.dow,this._week.doy).week},_week:{dow:0,doy:6},firstDayOfWeek:function(){return this._week.dow},firstDayOfYear:function(){return this._week.doy},_invalidDate:"Invalid date",invalidDate:function(){return this._invalidDate}}),vb=function(b,c,e,f){var g;return"boolean"==typeof e&&(f=e,e=a),g={},g._isAMomentObject=!0,g._i=b,g._f=c,g._l=e,g._strict=f,g._isUTC=!1,g._pf=d(),lb(g)},vb.suppressDeprecationWarnings=!1,vb.createFromInputFallback=f("moment construction falls back to js Date. This is discouraged and will be removed in upcoming major release. Please refer to https://github.com/moment/moment/issues/1407 for more info.",function(a){a._d=new Date(a._i+(a._useUTC?" UTC":""))}),vb.min=function(){var a=[].slice.call(arguments,0);return mb("isBefore",a)},vb.max=function(){var a=[].slice.call(arguments,0);return mb("isAfter",a)},vb.utc=function(b,c,e,f){var g;return"boolean"==typeof e&&(f=e,e=a),g={},g._isAMomentObject=!0,g._useUTC=!0,g._isUTC=!0,g._l=e,g._i=b,g._f=c,g._strict=f,g._pf=d(),lb(g).utc()},vb.unix=function(a){return vb(1e3*a)},vb.duration=function(a,b){var d,e,f,g,h=a,i=null;return vb.isDuration(a)?h={ms:a._milliseconds,d:a._days,M:a._months}:"number"==typeof a?(h={},b?h[b]=a:h.milliseconds=a):(i=Nb.exec(a))?(d="-"===i[1]?-1:1,h={y:0,d:C(i[Eb])*d,h:C(i[Fb])*d,m:C(i[Gb])*d,s:C(i[Hb])*d,ms:C(i[Ib])*d}):(i=Ob.exec(a))?(d="-"===i[1]?-1:1,f=function(a){var b=a&&parseFloat(a.replace(",","."));return(isNaN(b)?0:b)*d},h={y:f(i[2]),M:f(i[3]),d:f(i[4]),h:f(i[5]),m:f(i[6]),s:f(i[7]),w:f(i[8])}):null==h?h={}:"object"==typeof h&&("from"in h||"to"in h)&&(g=t(vb(h.from),vb(h.to)),h={},h.ms=g.milliseconds,h.M=g.months),e=new n(h),vb.isDuration(a)&&c(a,"_locale")&&(e._locale=a._locale),e},vb.version=yb,vb.defaultFormat=gc,vb.ISO_8601=function(){},vb.momentProperties=Kb,vb.updateOffset=function(){},vb.relativeTimeThreshold=function(b,c){return oc[b]===a?!1:c===a?oc[b]:(oc[b]=c,!0)},vb.lang=f("moment.lang is deprecated. Use moment.locale instead.",function(a,b){return vb.locale(a,b)}),vb.locale=function(a,b){var c;return a&&(c="undefined"!=typeof b?vb.defineLocale(a,b):vb.localeData(a),c&&(vb.duration._locale=vb._locale=c)),vb._locale._abbr},vb.defineLocale=function(a,b){return null!==b?(b.abbr=a,Jb[a]||(Jb[a]=new l),Jb[a].set(b),vb.locale(a),Jb[a]):(delete Jb[a],null)},vb.langData=f("moment.langData is deprecated. Use moment.localeData instead.",function(a){return vb.localeData(a)}),vb.localeData=function(a){var b;if(a&&a._locale&&a._locale._abbr&&(a=a._locale._abbr),!a)return vb._locale;if(!w(a)){if(b=L(a))return b;a=[a]}return K(a)},vb.isMoment=function(a){return a instanceof m||null!=a&&c(a,"_isAMomentObject")},vb.isDuration=function(a){return a instanceof n};for(xb=tc.length-1;xb>=0;--xb)B(tc[xb]);vb.normalizeUnits=function(a){return z(a)},vb.invalid=function(a){var b=vb.utc(0/0);return null!=a?o(b._pf,a):b._pf.userInvalidated=!0,b},vb.parseZone=function(){return vb.apply(null,arguments).parseZone()},vb.parseTwoDigitYear=function(a){return C(a)+(C(a)>68?1900:2e3)},vb.isDate=x,o(vb.fn=m.prototype,{clone:function(){return vb(this)},valueOf:function(){return+this._d-6e4*(this._offset||0)},unix:function(){return Math.floor(+this/1e3)},toString:function(){return this.clone().locale("en").format("ddd MMM DD YYYY HH:mm:ss [GMT]ZZ")},toDate:function(){return this._offset?new Date(+this):this._d},toISOString:function(){var a=vb(this).utc();return 00:!1},parsingFlags:function(){return o({},this._pf)},invalidAt:function(){return this._pf.overflow},utc:function(a){return this.utcOffset(0,a)},local:function(a){return this._isUTC&&(this.utcOffset(0,a),this._isUTC=!1,a&&this.subtract(this._dateUtcOffset(),"m")),this},format:function(a){var b=P(this,a||vb.defaultFormat);return this.localeData().postformat(b)},add:u(1,"add"),subtract:u(-1,"subtract"),diff:function(a,b,c){var d,e,f=M(a,this),g=6e4*(f.utcOffset()-this.utcOffset());return b=z(b),"year"===b||"month"===b||"quarter"===b?(e=j(this,f),"quarter"===b?e/=3:"year"===b&&(e/=12)):(d=this-f,e="second"===b?d/1e3:"minute"===b?d/6e4:"hour"===b?d/36e5:"day"===b?(d-g)/864e5:"week"===b?(d-g)/6048e5:d),c?e:q(e)},from:function(a,b){return vb.duration({to:this,from:a}).locale(this.locale()).humanize(!b)},fromNow:function(a){return this.from(vb(),a)},calendar:function(a){var b=a||vb(),c=M(b,this).startOf("day"),d=this.diff(c,"days",!0),e=-6>d?"sameElse":-1>d?"lastWeek":0>d?"lastDay":1>d?"sameDay":2>d?"nextDay":7>d?"nextWeek":"sameElse";return this.format(this.localeData().calendar(e,this,vb(b)))},isLeapYear:function(){return G(this.year())},isDST:function(){return this.utcOffset()>this.clone().month(0).utcOffset()||this.utcOffset()>this.clone().month(5).utcOffset()},day:function(a){var b=this._isUTC?this._d.getUTCDay():this._d.getDay();return null!=a?(a=gb(a,this.localeData()),this.add(a-b,"d")):b},month:qb("Month",!0),startOf:function(a){switch(a=z(a)){case"year":this.month(0);case"quarter":case"month":this.date(1);case"week":case"isoWeek":case"day":this.hours(0);case"hour":this.minutes(0);case"minute":this.seconds(0);case"second":this.milliseconds(0)}return"week"===a?this.weekday(0):"isoWeek"===a&&this.isoWeekday(1),"quarter"===a&&this.month(3*Math.floor(this.month()/3)),this},endOf:function(b){return b=z(b),b===a||"millisecond"===b?this:this.startOf(b).add(1,"isoWeek"===b?"week":b).subtract(1,"ms")},isAfter:function(a,b){var c;return b=z("undefined"!=typeof b?b:"millisecond"),"millisecond"===b?(a=vb.isMoment(a)?a:vb(a),+this>+a):(c=vb.isMoment(a)?+a:+vb(a),c<+this.clone().startOf(b))},isBefore:function(a,b){var c;return b=z("undefined"!=typeof b?b:"millisecond"),"millisecond"===b?(a=vb.isMoment(a)?a:vb(a),+a>+this):(c=vb.isMoment(a)?+a:+vb(a),+this.clone().endOf(b)a?this:a}),max:f("moment().max is deprecated, use moment.max instead. https://github.com/moment/moment/issues/1548",function(a){return a=vb.apply(null,arguments),a>this?this:a}),zone:f("moment().zone is deprecated, use moment().utcOffset instead. https://github.com/moment/moment/issues/1779",function(a,b){return null!=a?("string"!=typeof a&&(a=-a),this.utcOffset(a,b),this):-this.utcOffset()}),utcOffset:function(a,b){var c,d=this._offset||0;return null!=a?("string"==typeof a&&(a=S(a)),Math.abs(a)<16&&(a=60*a),!this._isUTC&&b&&(c=this._dateUtcOffset()),this._offset=a,this._isUTC=!0,null!=c&&this.add(c,"m"),d!==a&&(!b||this._changeInProgress?v(this,vb.duration(a-d,"m"),1,!1):this._changeInProgress||(this._changeInProgress=!0,vb.updateOffset(this,!0),this._changeInProgress=null)),this):this._isUTC?d:this._dateUtcOffset()},isLocal:function(){return!this._isUTC},isUtcOffset:function(){return this._isUTC},isUtc:function(){return this._isUTC&&0===this._offset},zoneAbbr:function(){return this._isUTC?"UTC":""},zoneName:function(){return this._isUTC?"Coordinated Universal Time":""},parseZone:function(){return this._tzm?this.utcOffset(this._tzm):"string"==typeof this._i&&this.utcOffset(S(this._i)),this},hasAlignedHourOffset:function(a){return a=a?vb(a).utcOffset():0,(this.utcOffset()-a)%60===0},daysInMonth:function(){return D(this.year(),this.month())},dayOfYear:function(a){var b=Ab((vb(this).startOf("day")-vb(this).startOf("year"))/864e5)+1;return null==a?b:this.add(a-b,"d")},quarter:function(a){return null==a?Math.ceil((this.month()+1)/3):this.month(3*(a-1)+this.month()%3)},weekYear:function(a){var b=jb(this,this.localeData()._week.dow,this.localeData()._week.doy).year;return null==a?b:this.add(a-b,"y")},isoWeekYear:function(a){var b=jb(this,1,4).year;return null==a?b:this.add(a-b,"y")},week:function(a){var b=this.localeData().week(this);return null==a?b:this.add(7*(a-b),"d")},isoWeek:function(a){var b=jb(this,1,4).week;return null==a?b:this.add(7*(a-b),"d")},weekday:function(a){var b=(this.day()+7-this.localeData()._week.dow)%7;return null==a?b:this.add(a-b,"d")},isoWeekday:function(a){return null==a?this.day()||7:this.day(this.day()%7?a:a-7)},isoWeeksInYear:function(){return E(this.year(),1,4)},weeksInYear:function(){var a=this.localeData()._week;return E(this.year(),a.dow,a.doy)},get:function(a){return a=z(a),this[a]()},set:function(a,b){var c;if("object"==typeof a)for(c in a)this.set(c,a[c]);else a=z(a),"function"==typeof this[a]&&this[a](b);return this},locale:function(b){var c;return b===a?this._locale._abbr:(c=vb.localeData(b),null!=c&&(this._locale=c),this)},lang:f("moment().lang() is deprecated. Instead, use moment().localeData() to get the language configuration. Use moment().locale() to change languages.",function(b){return b===a?this.localeData():this.locale(b)}),localeData:function(){return this._locale},_dateUtcOffset:function(){return 15*-Math.round(this._d.getTimezoneOffset()/15)}}),vb.fn.millisecond=vb.fn.milliseconds=qb("Milliseconds",!1),vb.fn.second=vb.fn.seconds=qb("Seconds",!1),vb.fn.minute=vb.fn.minutes=qb("Minutes",!1),vb.fn.hour=vb.fn.hours=qb("Hours",!0),vb.fn.date=qb("Date",!0),vb.fn.dates=f("dates accessor is deprecated. Use date instead.",qb("Date",!0)),vb.fn.year=qb("FullYear",!0),vb.fn.years=f("years accessor is deprecated. Use year instead.",qb("FullYear",!0)),vb.fn.days=vb.fn.day,vb.fn.months=vb.fn.month,vb.fn.weeks=vb.fn.week,vb.fn.isoWeeks=vb.fn.isoWeek,vb.fn.quarters=vb.fn.quarter,vb.fn.toJSON=vb.fn.toISOString,vb.fn.isUTC=vb.fn.isUtc,o(vb.duration.fn=n.prototype,{_bubble:function(){var a,b,c,d=this._milliseconds,e=this._days,f=this._months,g=this._data,h=0;g.milliseconds=d%1e3,a=q(d/1e3),g.seconds=a%60,b=q(a/60),g.minutes=b%60,c=q(b/60),g.hours=c%24,e+=q(c/24),h=q(rb(e)),e-=q(sb(h)),f+=q(e/30),e%=30,h+=q(f/12),f%=12,g.days=e,g.months=f,g.years=h},abs:function(){return this._milliseconds=Math.abs(this._milliseconds),this._days=Math.abs(this._days),this._months=Math.abs(this._months),this._data.milliseconds=Math.abs(this._data.milliseconds),this._data.seconds=Math.abs(this._data.seconds),this._data.minutes=Math.abs(this._data.minutes),this._data.hours=Math.abs(this._data.hours),this._data.months=Math.abs(this._data.months),this._data.years=Math.abs(this._data.years),this},weeks:function(){return q(this.days()/7)},valueOf:function(){return this._milliseconds+864e5*this._days+this._months%12*2592e6+31536e6*C(this._months/12) -},humanize:function(a){var b=ib(this,!a,this.localeData());return a&&(b=this.localeData().pastFuture(+this,b)),this.localeData().postformat(b)},add:function(a,b){var c=vb.duration(a,b);return this._milliseconds+=c._milliseconds,this._days+=c._days,this._months+=c._months,this._bubble(),this},subtract:function(a,b){var c=vb.duration(a,b);return this._milliseconds-=c._milliseconds,this._days-=c._days,this._months-=c._months,this._bubble(),this},get:function(a){return a=z(a),this[a.toLowerCase()+"s"]()},as:function(a){var b,c;if(a=z(a),"month"===a||"year"===a)return b=this._days+this._milliseconds/864e5,c=this._months+12*rb(b),"month"===a?c:c/12;switch(b=this._days+Math.round(sb(this._months/12)),a){case"week":return b/7+this._milliseconds/6048e5;case"day":return b+this._milliseconds/864e5;case"hour":return 24*b+this._milliseconds/36e5;case"minute":return 24*b*60+this._milliseconds/6e4;case"second":return 24*b*60*60+this._milliseconds/1e3;case"millisecond":return Math.floor(24*b*60*60*1e3)+this._milliseconds;default:throw new Error("Unknown unit "+a)}},lang:vb.fn.lang,locale:vb.fn.locale,toIsoString:f("toIsoString() is deprecated. Please use toISOString() instead (notice the capitals)",function(){return this.toISOString()}),toISOString:function(){var a=Math.abs(this.years()),b=Math.abs(this.months()),c=Math.abs(this.days()),d=Math.abs(this.hours()),e=Math.abs(this.minutes()),f=Math.abs(this.seconds()+this.milliseconds()/1e3);return this.asSeconds()?(this.asSeconds()<0?"-":"")+"P"+(a?a+"Y":"")+(b?b+"M":"")+(c?c+"D":"")+(d||e||f?"T":"")+(d?d+"H":"")+(e?e+"M":"")+(f?f+"S":""):"P0D"},localeData:function(){return this._locale},toJSON:function(){return this.toISOString()}}),vb.duration.fn.toString=vb.duration.fn.toISOString;for(xb in kc)c(kc,xb)&&tb(xb.toLowerCase());vb.duration.fn.asMilliseconds=function(){return this.as("ms")},vb.duration.fn.asSeconds=function(){return this.as("s")},vb.duration.fn.asMinutes=function(){return this.as("m")},vb.duration.fn.asHours=function(){return this.as("h")},vb.duration.fn.asDays=function(){return this.as("d")},vb.duration.fn.asWeeks=function(){return this.as("weeks")},vb.duration.fn.asMonths=function(){return this.as("M")},vb.duration.fn.asYears=function(){return this.as("y")},vb.locale("en",{ordinalParse:/\d{1,2}(th|st|nd|rd)/,ordinal:function(a){var b=a%10,c=1===C(a%100/10)?"th":1===b?"st":2===b?"nd":3===b?"rd":"th";return a+c}}),Lb?module.exports=vb:"function"==typeof define&&define.amd?(define(function(a,b,c){return c.config&&c.config()&&c.config().noGlobal===!0&&(zb.moment=wb),vb}),ub(!0)):ub()}).call(this); \ No newline at end of file +},humanize:function(a){var b=ib(this,!a,this.localeData());return a&&(b=this.localeData().pastFuture(+this,b)),this.localeData().postformat(b)},add:function(a,b){var c=vb.duration(a,b);return this._milliseconds+=c._milliseconds,this._days+=c._days,this._months+=c._months,this._bubble(),this},subtract:function(a,b){var c=vb.duration(a,b);return this._milliseconds-=c._milliseconds,this._days-=c._days,this._months-=c._months,this._bubble(),this},get:function(a){return a=z(a),this[a.toLowerCase()+"s"]()},as:function(a){var b,c;if(a=z(a),"month"===a||"year"===a)return b=this._days+this._milliseconds/864e5,c=this._months+12*rb(b),"month"===a?c:c/12;switch(b=this._days+Math.round(sb(this._months/12)),a){case"week":return b/7+this._milliseconds/6048e5;case"day":return b+this._milliseconds/864e5;case"hour":return 24*b+this._milliseconds/36e5;case"minute":return 24*b*60+this._milliseconds/6e4;case"second":return 24*b*60*60+this._milliseconds/1e3;case"millisecond":return Math.floor(24*b*60*60*1e3)+this._milliseconds;default:throw new Error("Unknown unit "+a)}},lang:vb.fn.lang,locale:vb.fn.locale,toIsoString:f("toIsoString() is deprecated. Please use toISOString() instead (notice the capitals)",function(){return this.toISOString()}),toISOString:function(){var a=Math.abs(this.years()),b=Math.abs(this.months()),c=Math.abs(this.days()),d=Math.abs(this.hours()),e=Math.abs(this.minutes()),f=Math.abs(this.seconds()+this.milliseconds()/1e3);return this.asSeconds()?(this.asSeconds()<0?"-":"")+"P"+(a?a+"Y":"")+(b?b+"M":"")+(c?c+"D":"")+(d||e||f?"T":"")+(d?d+"H":"")+(e?e+"M":"")+(f?f+"S":""):"P0D"},localeData:function(){return this._locale},toJSON:function(){return this.toISOString()}}),vb.duration.fn.toString=vb.duration.fn.toISOString;for(xb in kc)c(kc,xb)&&tb(xb.toLowerCase());vb.duration.fn.asMilliseconds=function(){return this.as("ms")},vb.duration.fn.asSeconds=function(){return this.as("s")},vb.duration.fn.asMinutes=function(){return this.as("m")},vb.duration.fn.asHours=function(){return this.as("h")},vb.duration.fn.asDays=function(){return this.as("d")},vb.duration.fn.asWeeks=function(){return this.as("weeks")},vb.duration.fn.asMonths=function(){return this.as("M")},vb.duration.fn.asYears=function(){return this.as("y")},vb.locale("en",{ordinalParse:/\d{1,2}(th|st|nd|rd)/,ordinal:function(a){var b=a%10,c=1===C(a%100/10)?"th":1===b?"st":2===b?"nd":3===b?"rd":"th";return a+c}}),Lb?module.exports=vb:"function"==typeof define&&define.amd?(define(function(a,b,c){return c.config&&c.config()&&c.config().noGlobal===!0&&(zb.moment=wb),vb}),ub(!0)):ub()}).call(this); diff --git a/ui/login.html b/ui/login.html index c2f66869..fe9167aa 100644 --- a/ui/login.html +++ b/ui/login.html @@ -60,7 +60,7 @@ - + @@ -82,7 +82,7 @@ })(jQuery); - \ No newline at end of file + diff --git a/ui/organisations.html b/ui/organisations.html index 22813eec..a8c51d73 100644 --- a/ui/organisations.html +++ b/ui/organisations.html @@ -6,7 +6,7 @@ - + @@ -81,8 +81,8 @@ - - + + @@ -91,4 +91,4 @@ - \ No newline at end of file + diff --git a/ui/relationships.html b/ui/relationships.html index cd81b1a9..a01f8baf 100644 --- a/ui/relationships.html +++ b/ui/relationships.html @@ -79,8 +79,8 @@ - - + + @@ -89,4 +89,4 @@ - \ No newline at end of file + From 9376200a8ea6dc6f2c369f73b9c38f5f9fa0cffb Mon Sep 17 00:00:00 2001 From: Tomek Urbaszek Date: Sat, 17 Oct 2020 16:48:17 +0200 Subject: [PATCH 04/48] Add Github Action and license badge (#60) --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 77d5116e..3d9f3728 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,8 @@

# Apache Kibble +![CI](https://github.com/apache/kibble/workflows/CI/badge.svg) +[![License](http://img.shields.io/:license-Apache%202-blue.svg)](http://www.apache.org/licenses/LICENSE-2.0.txt) Apache Kibble is a tool to collect, aggregate and visualize data about any software project that uses commonly known tools. It consists of two components: From aaae65de01e48d2c9a715e5b808192a0e2221f6e Mon Sep 17 00:00:00 2001 From: Tomek Urbaszek Date: Thu, 22 Oct 2020 17:31:28 +0200 Subject: [PATCH 05/48] Add and unify license headers (#61) Closes: #59 --- .pre-commit-config.yaml | 37 +++++++++++++++++++ api/handler.py | 30 ++++++++------- api/pages/__init__.py | 29 ++++++++------- api/pages/account.py | 32 ++++++++-------- api/pages/bio/bio.py | 29 ++++++++------- api/pages/bio/newtimers.py | 29 ++++++++------- api/pages/bio/trends.py | 31 ++++++++-------- api/pages/ci/queue.py | 29 ++++++++------- api/pages/ci/status.py | 29 ++++++++------- api/pages/ci/top-buildcount.py | 29 ++++++++------- api/pages/ci/top-buildtime.py | 29 ++++++++------- api/pages/code/changes.py | 29 ++++++++------- api/pages/code/commits.py | 29 ++++++++------- api/pages/code/committers.py | 30 ++++++++------- api/pages/code/evolution.py | 30 ++++++++------- api/pages/code/pony-timeseries.py | 30 ++++++++------- api/pages/code/pony.py | 30 ++++++++------- api/pages/code/punchcard.py | 30 ++++++++------- api/pages/code/relationships.py | 30 ++++++++------- api/pages/code/retention.py | 30 ++++++++------- api/pages/code/sloc.py | 30 ++++++++------- api/pages/code/top-commits.py | 30 ++++++++------- api/pages/code/top-sloc.py | 30 ++++++++------- api/pages/code/trends.py | 30 ++++++++------- api/pages/filters.py | 28 +++++++------- api/pages/forum/actors.py | 30 ++++++++------- api/pages/forum/creators.py | 30 ++++++++------- api/pages/forum/issues.py | 30 ++++++++------- api/pages/forum/responders.py | 30 ++++++++------- api/pages/forum/top-count.py | 30 ++++++++------- api/pages/forum/top.py | 30 ++++++++------- api/pages/forum/trends.py | 30 ++++++++------- api/pages/issue/actors.py | 30 ++++++++------- api/pages/issue/age.py | 30 ++++++++------- api/pages/issue/closers.py | 30 ++++++++------- api/pages/issue/issues.py | 30 ++++++++------- api/pages/issue/openers.py | 30 ++++++++------- api/pages/issue/pony-timeseries.py | 30 ++++++++------- api/pages/issue/relationships.py | 30 ++++++++------- api/pages/issue/retention.py | 30 ++++++++------- api/pages/issue/top-count.py | 30 ++++++++------- api/pages/issue/top.py | 30 ++++++++------- api/pages/issue/trends.py | 30 ++++++++------- api/pages/mail/keyphrases.py | 30 ++++++++------- api/pages/mail/map.py | 30 ++++++++------- api/pages/mail/mood-timeseries.py | 30 ++++++++------- api/pages/mail/mood.py | 30 ++++++++------- api/pages/mail/pony-timeseries.py | 30 ++++++++------- api/pages/mail/relationships.py | 30 ++++++++------- api/pages/mail/retention.py | 30 ++++++++------- api/pages/mail/timeseries-single.py | 30 ++++++++------- api/pages/mail/timeseries.py | 30 ++++++++------- api/pages/mail/top-authors.py | 30 ++++++++------- api/pages/mail/top-topics.py | 30 ++++++++------- api/pages/mail/trends.py | 30 ++++++++------- api/pages/org/contributors.py | 30 ++++++++------- api/pages/org/list.py | 30 ++++++++------- api/pages/org/members.py | 30 ++++++++------- api/pages/org/sourcetypes.py | 30 ++++++++------- api/pages/org/trends.py | 30 ++++++++------- api/pages/session.py | 30 ++++++++------- api/pages/sources.py | 30 ++++++++------- api/pages/verify.py | 29 ++++++++------- api/pages/views.py | 30 ++++++++------- api/pages/widgets.py | 29 ++++++++------- api/plugins/database.py | 28 +++++++------- api/plugins/openapi.py | 28 +++++++------- api/plugins/session.py | 28 +++++++------- api/yaml/openapi/combine.py | 36 ++++++++---------- .../_static/images/kibble-architecture.puml | 19 ++++++++++ docs/source/conf.py | 18 ++++++++- docs/source/index.rst | 17 +++++++++ docs/source/managing.rst | 17 +++++++++ docs/source/setup.rst | 17 +++++++++ docs/source/usecases.rst | 17 +++++++++ license-templates/LICENSE.rst | 16 ++++++++ license-templates/LICENSE.txt | 16 ++++++++ setup/makeaccount.py | 27 +++++++------- setup/setup.py | 29 ++++++++------- ui/css/c3.css | 19 ++++++++++ ui/css/chosen.css | 19 ++++++++++ ui/css/daterangepicker.css | 19 ++++++++++ ui/css/kibble.min.css | 19 ++++++++++ ui/css/main.css | 19 ++++++++++ ui/css/theme.css | 19 ++++++++++ ui/js/coffee/combine.sh | 17 +++++++++ 86 files changed, 1402 insertions(+), 989 deletions(-) create mode 100644 license-templates/LICENSE.rst create mode 100644 license-templates/LICENSE.txt diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 3dd6d750..0bb7740f 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -31,3 +31,40 @@ repos: exclude: ^ui/vendors/.*$ - id: trailing-whitespace exclude: ^ui/vendors/.*$ + - id: fix-encoding-pragma + args: + - --remove + - repo: https://github.com/Lucas-C/pre-commit-hooks + rev: v1.1.9 + hooks: + - id: insert-license + name: Add license for all other files + exclude: ^\.github/.*$ + args: + - --comment-style + - "|#|" + - --license-filepath + - license-templates/LICENSE.txt + - --fuzzy-match-generates-todo + files: > + \.cfg$|^Dockerfile.*$|\.sh$|\.bash$|\.py$|\.yml$|\.yaml$ + - id: insert-license + name: Add license for all rst files + exclude: ^\.github/.*$ + args: + - --comment-style + - "||" + - --license-filepath + - license-templates/LICENSE.rst + - --fuzzy-match-generates-todo + files: \.rst$ + - id: insert-license + name: Add license for all md and html files + files: \.md$|\.html$ + exclude: ^\.github/.*$| + args: + - --comment-style + - "" + - --license-filepath + - license-templates/LICENSE.txt + - --fuzzy-match-generates-todo diff --git a/api/handler.py b/api/handler.py index 41663a53..124252c4 100644 --- a/api/handler.py +++ b/api/handler.py @@ -1,19 +1,21 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at + + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. """ This is the main WSGI handler file for Apache Kibble. diff --git a/api/pages/__init__.py b/api/pages/__init__.py index 1f8ef384..af0f5823 100644 --- a/api/pages/__init__.py +++ b/api/pages/__init__.py @@ -1,19 +1,20 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# http://www.apache.org/licenses/LICENSE-2.0 # +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + """ Kibble API scripts library: diff --git a/api/pages/account.py b/api/pages/account.py index c3d700bd..8821eee0 100644 --- a/api/pages/account.py +++ b/api/pages/account.py @@ -1,19 +1,20 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + ######################################################################## # OPENAPI-URI: /api/account ######################################################################## @@ -89,9 +90,6 @@ ######################################################################## - - - """ This is the user account handler for Kibble. adds, removes and edits accounts. diff --git a/api/pages/bio/bio.py b/api/pages/bio/bio.py index c3aa5338..c2c91cbb 100644 --- a/api/pages/bio/bio.py +++ b/api/pages/bio/bio.py @@ -1,19 +1,20 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + ######################################################################## # OPENAPI-URI: /api/bio/bio ######################################################################## diff --git a/api/pages/bio/newtimers.py b/api/pages/bio/newtimers.py index 8dd4dc20..e85a08b7 100644 --- a/api/pages/bio/newtimers.py +++ b/api/pages/bio/newtimers.py @@ -1,19 +1,20 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + ######################################################################## # OPENAPI-URI: /api/bio/newtimers ######################################################################## diff --git a/api/pages/bio/trends.py b/api/pages/bio/trends.py index 7e5e92b7..18b84b74 100644 --- a/api/pages/bio/trends.py +++ b/api/pages/bio/trends.py @@ -1,20 +1,21 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -######################################################################## +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +####################################################################### # OPENAPI-URI: /api/bio/trends ######################################################################## # get: diff --git a/api/pages/ci/queue.py b/api/pages/ci/queue.py index bba2f65d..b64c6ccb 100644 --- a/api/pages/ci/queue.py +++ b/api/pages/ci/queue.py @@ -1,19 +1,20 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + ######################################################################## # OPENAPI-URI: /api/ci/queue ######################################################################## diff --git a/api/pages/ci/status.py b/api/pages/ci/status.py index 2891a791..1629bc6d 100644 --- a/api/pages/ci/status.py +++ b/api/pages/ci/status.py @@ -1,19 +1,20 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + ######################################################################## # OPENAPI-URI: /api/ci/status ######################################################################## diff --git a/api/pages/ci/top-buildcount.py b/api/pages/ci/top-buildcount.py index d688346e..96e12cb4 100644 --- a/api/pages/ci/top-buildcount.py +++ b/api/pages/ci/top-buildcount.py @@ -1,19 +1,20 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + ######################################################################## # OPENAPI-URI: /api/ci/top-buildcount ######################################################################## diff --git a/api/pages/ci/top-buildtime.py b/api/pages/ci/top-buildtime.py index 46fafca9..a9481ee4 100644 --- a/api/pages/ci/top-buildtime.py +++ b/api/pages/ci/top-buildtime.py @@ -1,19 +1,20 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + ######################################################################## # OPENAPI-URI: /api/ci/top-buildtime ######################################################################## diff --git a/api/pages/code/changes.py b/api/pages/code/changes.py index 72a90cf1..6365d537 100644 --- a/api/pages/code/changes.py +++ b/api/pages/code/changes.py @@ -1,19 +1,20 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + ######################################################################## # OPENAPI-URI: /api/code/changes ######################################################################## diff --git a/api/pages/code/commits.py b/api/pages/code/commits.py index 54bb4763..2b450387 100644 --- a/api/pages/code/commits.py +++ b/api/pages/code/commits.py @@ -1,19 +1,20 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + ######################################################################## # OPENAPI-URI: /api/code/commits ######################################################################## diff --git a/api/pages/code/committers.py b/api/pages/code/committers.py index a1370984..b48ae256 100644 --- a/api/pages/code/committers.py +++ b/api/pages/code/committers.py @@ -1,19 +1,21 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + ######################################################################## # OPENAPI-URI: /api/code/committers ######################################################################## diff --git a/api/pages/code/evolution.py b/api/pages/code/evolution.py index 8bc159bc..dd33c123 100644 --- a/api/pages/code/evolution.py +++ b/api/pages/code/evolution.py @@ -1,19 +1,21 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + ######################################################################## # OPENAPI-URI: /api/code/evolution ######################################################################## diff --git a/api/pages/code/pony-timeseries.py b/api/pages/code/pony-timeseries.py index 5427b440..2a27c009 100644 --- a/api/pages/code/pony-timeseries.py +++ b/api/pages/code/pony-timeseries.py @@ -1,19 +1,21 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + ######################################################################## # OPENAPI-URI: /api/code/pony-timeseries ######################################################################## diff --git a/api/pages/code/pony.py b/api/pages/code/pony.py index ee64c8b5..895a17af 100644 --- a/api/pages/code/pony.py +++ b/api/pages/code/pony.py @@ -1,19 +1,21 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + ######################################################################## # OPENAPI-URI: /api/code/pony ######################################################################## diff --git a/api/pages/code/punchcard.py b/api/pages/code/punchcard.py index a8bf26bd..babbaeba 100644 --- a/api/pages/code/punchcard.py +++ b/api/pages/code/punchcard.py @@ -1,19 +1,21 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + ######################################################################## # OPENAPI-URI: /api/code/punchcard ######################################################################## diff --git a/api/pages/code/relationships.py b/api/pages/code/relationships.py index 1786b7e6..f722b7ce 100644 --- a/api/pages/code/relationships.py +++ b/api/pages/code/relationships.py @@ -1,19 +1,21 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + ######################################################################## # OPENAPI-URI: /api/code/relationships ######################################################################## diff --git a/api/pages/code/retention.py b/api/pages/code/retention.py index 70a7bc7b..31debdc0 100644 --- a/api/pages/code/retention.py +++ b/api/pages/code/retention.py @@ -1,19 +1,21 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + ######################################################################## # OPENAPI-URI: /api/code/retention ######################################################################## diff --git a/api/pages/code/sloc.py b/api/pages/code/sloc.py index a6d7fc22..9709fc3e 100644 --- a/api/pages/code/sloc.py +++ b/api/pages/code/sloc.py @@ -1,19 +1,21 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + ######################################################################## # OPENAPI-URI: /api/code/sloc ######################################################################## diff --git a/api/pages/code/top-commits.py b/api/pages/code/top-commits.py index d811082b..3c782e97 100644 --- a/api/pages/code/top-commits.py +++ b/api/pages/code/top-commits.py @@ -1,19 +1,21 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + ######################################################################## # OPENAPI-URI: /api/code/top-commits ######################################################################## diff --git a/api/pages/code/top-sloc.py b/api/pages/code/top-sloc.py index 4cdf2762..6beafaac 100644 --- a/api/pages/code/top-sloc.py +++ b/api/pages/code/top-sloc.py @@ -1,19 +1,21 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + ######################################################################## # OPENAPI-URI: /api/code/top-sloc ######################################################################## diff --git a/api/pages/code/trends.py b/api/pages/code/trends.py index d0cfc449..da1803ca 100644 --- a/api/pages/code/trends.py +++ b/api/pages/code/trends.py @@ -1,19 +1,21 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + ######################################################################## # OPENAPI-URI: /api/code/trends ######################################################################## diff --git a/api/pages/filters.py b/api/pages/filters.py index a97112cb..cedf96e6 100644 --- a/api/pages/filters.py +++ b/api/pages/filters.py @@ -1,19 +1,19 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. """ This is the source list handler for Kibble diff --git a/api/pages/forum/actors.py b/api/pages/forum/actors.py index 40ad8ae2..ede9f6a4 100644 --- a/api/pages/forum/actors.py +++ b/api/pages/forum/actors.py @@ -1,19 +1,21 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + ######################################################################## # OPENAPI-URI: /api/forum/actors ######################################################################## diff --git a/api/pages/forum/creators.py b/api/pages/forum/creators.py index 574e4577..f5f92703 100644 --- a/api/pages/forum/creators.py +++ b/api/pages/forum/creators.py @@ -1,19 +1,21 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + ######################################################################## # OPENAPI-URI: /api/forum/creators ######################################################################## diff --git a/api/pages/forum/issues.py b/api/pages/forum/issues.py index fa27035b..b4f7fc0d 100644 --- a/api/pages/forum/issues.py +++ b/api/pages/forum/issues.py @@ -1,19 +1,21 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + ######################################################################## # OPENAPI-URI: /api/forum/issues ######################################################################## diff --git a/api/pages/forum/responders.py b/api/pages/forum/responders.py index d379c1ef..0ffa5a3b 100644 --- a/api/pages/forum/responders.py +++ b/api/pages/forum/responders.py @@ -1,19 +1,21 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + ######################################################################## # OPENAPI-URI: /api/forum/responders ######################################################################## diff --git a/api/pages/forum/top-count.py b/api/pages/forum/top-count.py index 585407fd..545567ac 100644 --- a/api/pages/forum/top-count.py +++ b/api/pages/forum/top-count.py @@ -1,19 +1,21 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + ######################################################################## # OPENAPI-URI: /api/forum/top-count ######################################################################## diff --git a/api/pages/forum/top.py b/api/pages/forum/top.py index 3d184320..483aef17 100644 --- a/api/pages/forum/top.py +++ b/api/pages/forum/top.py @@ -1,19 +1,21 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + ######################################################################## # OPENAPI-URI: /api/forum/top ######################################################################## diff --git a/api/pages/forum/trends.py b/api/pages/forum/trends.py index 208ccb0f..27efa264 100644 --- a/api/pages/forum/trends.py +++ b/api/pages/forum/trends.py @@ -1,19 +1,21 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + ######################################################################## # OPENAPI-URI: /api/forum/trends ######################################################################## diff --git a/api/pages/issue/actors.py b/api/pages/issue/actors.py index b53e839a..edcbc541 100644 --- a/api/pages/issue/actors.py +++ b/api/pages/issue/actors.py @@ -1,19 +1,21 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + ######################################################################## # OPENAPI-URI: /api/issue/actors ######################################################################## diff --git a/api/pages/issue/age.py b/api/pages/issue/age.py index b6a3d901..cafdaf49 100644 --- a/api/pages/issue/age.py +++ b/api/pages/issue/age.py @@ -1,19 +1,21 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + ######################################################################## # OPENAPI-URI: /api/issue/age ######################################################################## diff --git a/api/pages/issue/closers.py b/api/pages/issue/closers.py index 53ada24a..90bd56a4 100644 --- a/api/pages/issue/closers.py +++ b/api/pages/issue/closers.py @@ -1,19 +1,21 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + ######################################################################## # OPENAPI-URI: /api/issue/closers ######################################################################## diff --git a/api/pages/issue/issues.py b/api/pages/issue/issues.py index dac17211..b947cb1d 100644 --- a/api/pages/issue/issues.py +++ b/api/pages/issue/issues.py @@ -1,19 +1,21 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + ######################################################################## # OPENAPI-URI: /api/issue/issues ######################################################################## diff --git a/api/pages/issue/openers.py b/api/pages/issue/openers.py index a2e081e5..85500742 100644 --- a/api/pages/issue/openers.py +++ b/api/pages/issue/openers.py @@ -1,19 +1,21 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + ######################################################################## # OPENAPI-URI: /api/issue/openers ######################################################################## diff --git a/api/pages/issue/pony-timeseries.py b/api/pages/issue/pony-timeseries.py index f8ae3608..22068ddd 100644 --- a/api/pages/issue/pony-timeseries.py +++ b/api/pages/issue/pony-timeseries.py @@ -1,19 +1,21 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + ######################################################################## # OPENAPI-URI: /api/issue/pony-timeseries ######################################################################## diff --git a/api/pages/issue/relationships.py b/api/pages/issue/relationships.py index ff0c9d6a..eb8fb767 100644 --- a/api/pages/issue/relationships.py +++ b/api/pages/issue/relationships.py @@ -1,19 +1,21 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + ######################################################################## # OPENAPI-URI: /api/issue/relationships ######################################################################## diff --git a/api/pages/issue/retention.py b/api/pages/issue/retention.py index 3d745418..1cdecfd8 100644 --- a/api/pages/issue/retention.py +++ b/api/pages/issue/retention.py @@ -1,19 +1,21 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + ######################################################################## # OPENAPI-URI: /api/issue/retention ######################################################################## diff --git a/api/pages/issue/top-count.py b/api/pages/issue/top-count.py index ebb121c8..4e3f5ae9 100644 --- a/api/pages/issue/top-count.py +++ b/api/pages/issue/top-count.py @@ -1,19 +1,21 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + ######################################################################## # OPENAPI-URI: /api/issue/top-count ######################################################################## diff --git a/api/pages/issue/top.py b/api/pages/issue/top.py index 42b4d38e..87a05f1b 100644 --- a/api/pages/issue/top.py +++ b/api/pages/issue/top.py @@ -1,19 +1,21 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + ######################################################################## # OPENAPI-URI: /api/issue/top ######################################################################## diff --git a/api/pages/issue/trends.py b/api/pages/issue/trends.py index c6b45152..61c528ba 100644 --- a/api/pages/issue/trends.py +++ b/api/pages/issue/trends.py @@ -1,19 +1,21 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + ######################################################################## # OPENAPI-URI: /api/issue/trends ######################################################################## diff --git a/api/pages/mail/keyphrases.py b/api/pages/mail/keyphrases.py index 4f8936e1..ed03282b 100644 --- a/api/pages/mail/keyphrases.py +++ b/api/pages/mail/keyphrases.py @@ -1,19 +1,21 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + ######################################################################## # OPENAPI-URI: /api/mail/keyphrases ######################################################################## diff --git a/api/pages/mail/map.py b/api/pages/mail/map.py index b0f8398c..14f7170f 100644 --- a/api/pages/mail/map.py +++ b/api/pages/mail/map.py @@ -1,19 +1,21 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + ######################################################################## # OPENAPI-URI: /api/mail/map ######################################################################## diff --git a/api/pages/mail/mood-timeseries.py b/api/pages/mail/mood-timeseries.py index 21f26d03..60cc33e3 100644 --- a/api/pages/mail/mood-timeseries.py +++ b/api/pages/mail/mood-timeseries.py @@ -1,19 +1,21 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + ######################################################################## # OPENAPI-URI: /api/mail/mood-timeseries ######################################################################## diff --git a/api/pages/mail/mood.py b/api/pages/mail/mood.py index 38dd57fc..ae51fb19 100644 --- a/api/pages/mail/mood.py +++ b/api/pages/mail/mood.py @@ -1,19 +1,21 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + ######################################################################## # OPENAPI-URI: /api/mail/mood ######################################################################## diff --git a/api/pages/mail/pony-timeseries.py b/api/pages/mail/pony-timeseries.py index 37d160fb..033d7997 100644 --- a/api/pages/mail/pony-timeseries.py +++ b/api/pages/mail/pony-timeseries.py @@ -1,19 +1,21 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + ######################################################################## # OPENAPI-URI: /api/mail/pony-timeseries ######################################################################## diff --git a/api/pages/mail/relationships.py b/api/pages/mail/relationships.py index c10b6343..1633a25c 100644 --- a/api/pages/mail/relationships.py +++ b/api/pages/mail/relationships.py @@ -1,19 +1,21 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + ######################################################################## # OPENAPI-URI: /api/mail/relationships ######################################################################## diff --git a/api/pages/mail/retention.py b/api/pages/mail/retention.py index bc93b9ce..aa4f03e5 100644 --- a/api/pages/mail/retention.py +++ b/api/pages/mail/retention.py @@ -1,19 +1,21 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + ######################################################################## # OPENAPI-URI: /api/mail/retention ######################################################################## diff --git a/api/pages/mail/timeseries-single.py b/api/pages/mail/timeseries-single.py index 8b8b231c..82580aa9 100644 --- a/api/pages/mail/timeseries-single.py +++ b/api/pages/mail/timeseries-single.py @@ -1,19 +1,21 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + ######################################################################## # OPENAPI-URI: /api/mail/timeseries-single ######################################################################## diff --git a/api/pages/mail/timeseries.py b/api/pages/mail/timeseries.py index ab4e12a1..5072de4d 100644 --- a/api/pages/mail/timeseries.py +++ b/api/pages/mail/timeseries.py @@ -1,19 +1,21 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + ######################################################################## # OPENAPI-URI: /api/mail/timeseries ######################################################################## diff --git a/api/pages/mail/top-authors.py b/api/pages/mail/top-authors.py index c2f2cb21..4f94e0f6 100644 --- a/api/pages/mail/top-authors.py +++ b/api/pages/mail/top-authors.py @@ -1,19 +1,21 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + ######################################################################## # OPENAPI-URI: /api/mail/top-authors ######################################################################## diff --git a/api/pages/mail/top-topics.py b/api/pages/mail/top-topics.py index c9af57cc..90c5a805 100644 --- a/api/pages/mail/top-topics.py +++ b/api/pages/mail/top-topics.py @@ -1,19 +1,21 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + ######################################################################## # OPENAPI-URI: /api/mail/top-topics ######################################################################## diff --git a/api/pages/mail/trends.py b/api/pages/mail/trends.py index baa1f1d4..91dcc3be 100644 --- a/api/pages/mail/trends.py +++ b/api/pages/mail/trends.py @@ -1,19 +1,21 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + ######################################################################## # OPENAPI-URI: /api/mail/trends ######################################################################## diff --git a/api/pages/org/contributors.py b/api/pages/org/contributors.py index 3d243622..aec3056d 100644 --- a/api/pages/org/contributors.py +++ b/api/pages/org/contributors.py @@ -1,19 +1,21 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + ######################################################################## # OPENAPI-URI: /api/org/contributors ######################################################################## diff --git a/api/pages/org/list.py b/api/pages/org/list.py index bf28f4d6..d21ef01b 100644 --- a/api/pages/org/list.py +++ b/api/pages/org/list.py @@ -1,19 +1,21 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + ######################################################################## # OPENAPI-URI: /api/org/list ######################################################################## diff --git a/api/pages/org/members.py b/api/pages/org/members.py index b749b2fd..71e4be38 100644 --- a/api/pages/org/members.py +++ b/api/pages/org/members.py @@ -1,19 +1,21 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + ######################################################################## # OPENAPI-URI: /api/org/members ######################################################################## diff --git a/api/pages/org/sourcetypes.py b/api/pages/org/sourcetypes.py index 6fa84025..8c41b007 100644 --- a/api/pages/org/sourcetypes.py +++ b/api/pages/org/sourcetypes.py @@ -1,19 +1,21 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + ######################################################################## # OPENAPI-URI: /api/org/sourcetypes ######################################################################## diff --git a/api/pages/org/trends.py b/api/pages/org/trends.py index 4890c63f..db2c0586 100644 --- a/api/pages/org/trends.py +++ b/api/pages/org/trends.py @@ -1,19 +1,21 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + ######################################################################## # OPENAPI-URI: /api/org/trends ######################################################################## diff --git a/api/pages/session.py b/api/pages/session.py index d09daabe..eebd9136 100644 --- a/api/pages/session.py +++ b/api/pages/session.py @@ -1,19 +1,21 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + ######################################################################## # OPENAPI-URI: /api/session ######################################################################## diff --git a/api/pages/sources.py b/api/pages/sources.py index a15a55b0..86626847 100644 --- a/api/pages/sources.py +++ b/api/pages/sources.py @@ -1,19 +1,21 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + ######################################################################## # OPENAPI-URI: /api/sources ######################################################################## diff --git a/api/pages/verify.py b/api/pages/verify.py index 8ef6f4f1..3ff61f7c 100644 --- a/api/pages/verify.py +++ b/api/pages/verify.py @@ -1,19 +1,20 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + ######################################################################## # OPENAPI-URI: /api/verify/{email}/{vcode} ######################################################################## diff --git a/api/pages/views.py b/api/pages/views.py index bc619f21..2fd1a501 100644 --- a/api/pages/views.py +++ b/api/pages/views.py @@ -1,19 +1,21 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + ######################################################################## # OPENAPI-URI: /api/views ######################################################################## diff --git a/api/pages/widgets.py b/api/pages/widgets.py index df5d4389..db3f5fdf 100644 --- a/api/pages/widgets.py +++ b/api/pages/widgets.py @@ -1,19 +1,20 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + ######################################################################## # OPENAPI-URI: /api/widgets/{pageid} ######################################################################## diff --git a/api/plugins/database.py b/api/plugins/database.py index 395808d4..913f41dc 100644 --- a/api/plugins/database.py +++ b/api/plugins/database.py @@ -1,19 +1,19 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. """ This is the ES library for Apache Kibble. diff --git a/api/plugins/openapi.py b/api/plugins/openapi.py index f2a7e3f0..044ff882 100644 --- a/api/plugins/openapi.py +++ b/api/plugins/openapi.py @@ -1,19 +1,19 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. """ This is the OpenAPI validator library. diff --git a/api/plugins/session.py b/api/plugins/session.py index 7d4522bf..56fb87c4 100644 --- a/api/plugins/session.py +++ b/api/plugins/session.py @@ -1,19 +1,19 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. """ This is the session library for Apache Kibble. diff --git a/api/yaml/openapi/combine.py b/api/yaml/openapi/combine.py index 7f5bc545..689a0f2f 100644 --- a/api/yaml/openapi/combine.py +++ b/api/yaml/openapi/combine.py @@ -1,28 +1,25 @@ -#!/usr/bin/env python3 +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. import yaml import os import sys import re -license = """#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" - baseyaml = """ # THIS IS PULLED FROM SCRIPTS AND AUTOGENERATED! # Please use openapi/combine.py to regenerate! @@ -55,7 +52,6 @@ def deconstruct(): odefs = yaml.dump(defs, default_flow_style=False) odefs = "\n".join(["# %s" % line for line in odefs.split("\n")]) with open(ypath, "w") as f: - f.write(license) f.write("########################################################################\n") f.write("# OPENAPI-URI: %s\n" % endpoint) f.write("########################################################################\n") diff --git a/docs/source/_static/images/kibble-architecture.puml b/docs/source/_static/images/kibble-architecture.puml index 4a924faa..7b21b209 100644 --- a/docs/source/_static/images/kibble-architecture.puml +++ b/docs/source/_static/images/kibble-architecture.puml @@ -1,3 +1,22 @@ +/*! + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + @startuml actor user database elasticsearch diff --git a/docs/source/conf.py b/docs/source/conf.py index 54491833..9f49f275 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -1,4 +1,20 @@ -# -*- coding: utf-8 -*- +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + # # Apache Kibble documentation build configuration file, created by # sphinx-quickstart on Thu Jan 11 06:05:51 2018. diff --git a/docs/source/index.rst b/docs/source/index.rst index 11c8879e..3d39a2f4 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -1,3 +1,20 @@ + .. Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + .. http://www.apache.org/licenses/LICENSE-2.0 + + .. Unless required by applicable law or agreed to in writing, + software distributed under the License is distributed on an + "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + KIND, either express or implied. See the License for the + specific language governing permissions and limitations + under the License. + .. Apache Kibble documentation master file, created by sphinx-quickstart on Thu Jan 11 06:05:51 2018. You can adapt this file completely to your liking, but it should at least diff --git a/docs/source/managing.rst b/docs/source/managing.rst index 65c39510..2020808f 100644 --- a/docs/source/managing.rst +++ b/docs/source/managing.rst @@ -1,3 +1,20 @@ + .. Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + .. http://www.apache.org/licenses/LICENSE-2.0 + + .. Unless required by applicable law or agreed to in writing, + software distributed under the License is distributed on an + "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + KIND, either express or implied. See the License for the + specific language governing permissions and limitations + under the License. + Managing Apache Kibble ====================== diff --git a/docs/source/setup.rst b/docs/source/setup.rst index 21a25a66..868dc92a 100644 --- a/docs/source/setup.rst +++ b/docs/source/setup.rst @@ -1,3 +1,20 @@ + .. Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + .. http://www.apache.org/licenses/LICENSE-2.0 + + .. Unless required by applicable law or agreed to in writing, + software distributed under the License is distributed on an + "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + KIND, either express or implied. See the License for the + specific language governing permissions and limitations + under the License. + Setting up Apache Kibble ======================== diff --git a/docs/source/usecases.rst b/docs/source/usecases.rst index 629058f1..da65192e 100644 --- a/docs/source/usecases.rst +++ b/docs/source/usecases.rst @@ -1,3 +1,20 @@ + .. Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + .. http://www.apache.org/licenses/LICENSE-2.0 + + .. Unless required by applicable law or agreed to in writing, + software distributed under the License is distributed on an + "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + KIND, either express or implied. See the License for the + specific language governing permissions and limitations + under the License. + Use Cases ======================== diff --git a/license-templates/LICENSE.rst b/license-templates/LICENSE.rst new file mode 100644 index 00000000..adf897d1 --- /dev/null +++ b/license-templates/LICENSE.rst @@ -0,0 +1,16 @@ +.. Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, + software distributed under the License is distributed on an + "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + KIND, either express or implied. See the License for the + specific language governing permissions and limitations + under the License. diff --git a/license-templates/LICENSE.txt b/license-templates/LICENSE.txt new file mode 100644 index 00000000..60b675e3 --- /dev/null +++ b/license-templates/LICENSE.txt @@ -0,0 +1,16 @@ +Licensed to the Apache Software Foundation (ASF) under one +or more contributor license agreements. See the NOTICE file +distributed with this work for additional information +regarding copyright ownership. The ASF licenses this file +to you under the Apache License, Version 2.0 (the +"License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, +software distributed under the License is distributed on an +"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +KIND, either express or implied. See the License for the +specific language governing permissions and limitations +under the License. diff --git a/setup/makeaccount.py b/setup/makeaccount.py index 866c8eeb..64015bb6 100644 --- a/setup/makeaccount.py +++ b/setup/makeaccount.py @@ -1,18 +1,19 @@ -#!/usr/bin/env python3 -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. import sys, os, os.path import elasticsearch diff --git a/setup/setup.py b/setup/setup.py index 92246754..19b20898 100644 --- a/setup/setup.py +++ b/setup/setup.py @@ -1,18 +1,21 @@ -#!/usr/bin/env python3 -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at + + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. import sys import os diff --git a/ui/css/c3.css b/ui/css/c3.css index fffcd25a..f3b43ab0 100644 --- a/ui/css/c3.css +++ b/ui/css/c3.css @@ -1,3 +1,22 @@ +/*! + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + /*-- Chart --*/ .c3 svg { font: 10px sans-serif; diff --git a/ui/css/chosen.css b/ui/css/chosen.css index d4219b49..9feaa026 100644 --- a/ui/css/chosen.css +++ b/ui/css/chosen.css @@ -1,3 +1,22 @@ +/*! + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + /*! Chosen, a Select Box Enhancer for jQuery and Prototype by Patrick Filler for Harvest, http://getharvest.com diff --git a/ui/css/daterangepicker.css b/ui/css/daterangepicker.css index 1d643674..c06ed521 100644 --- a/ui/css/daterangepicker.css +++ b/ui/css/daterangepicker.css @@ -1,3 +1,22 @@ +/*! + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + .daterangepicker { position: absolute; color: inherit; diff --git a/ui/css/kibble.min.css b/ui/css/kibble.min.css index d7673f67..00b73b95 100644 --- a/ui/css/kibble.min.css +++ b/ui/css/kibble.min.css @@ -1,3 +1,22 @@ +/*! + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + /* * Kibble CSS */ diff --git a/ui/css/main.css b/ui/css/main.css index eee1265e..05e32579 100644 --- a/ui/css/main.css +++ b/ui/css/main.css @@ -1,3 +1,22 @@ +/*! + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + /** * bootstrap-admin-template - Free Admin Template Based On Twitter Bootstrap 3.x * @version 2.4.2 diff --git a/ui/css/theme.css b/ui/css/theme.css index 7c5e55d7..f0e318cd 100644 --- a/ui/css/theme.css +++ b/ui/css/theme.css @@ -1,3 +1,22 @@ +/*! + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + /** * bootstrap-admin-template - Free Admin Template Based On Twitter Bootstrap 3.x * @version 2.4.2 diff --git a/ui/js/coffee/combine.sh b/ui/js/coffee/combine.sh index 4c3a9698..9505d661 100644 --- a/ui/js/coffee/combine.sh +++ b/ui/js/coffee/combine.sh @@ -1,2 +1,19 @@ #!/bin/bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + coffee -b --join ../kibble.v1.js -c *.coffee From 7092e8685982f701d26e9b737659647c6cb1c606 Mon Sep 17 00:00:00 2001 From: Tomek Urbaszek Date: Fri, 23 Oct 2020 14:22:30 +0200 Subject: [PATCH 06/48] Add dev docker compose (#50) --- CONTRIBUTING.md | 25 ++++++++++++++-- Dockerfile.dev | 31 ++++++++++++++++++++ docker-compose-dev.yaml | 64 +++++++++++++++++++++++++++++++++++++++++ nginx-dev.conf | 34 ++++++++++++++++++++++ setup/requirements.txt | 14 +++++---- setup/setup.py | 35 +++++++++++++--------- 6 files changed, 181 insertions(+), 22 deletions(-) create mode 100644 Dockerfile.dev create mode 100644 docker-compose-dev.yaml create mode 100644 nginx-dev.conf diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 2c3351a7..b3ef0dd1 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -15,8 +15,29 @@ We also have: ## Development installation -This project requires Python in higher version than 3.4. -More information will come soon! +The easiest option to spin up a development environment is to use our development docker-compose. +The development image has mounted all Kibble sources so all your local code changes will be automatically +reflected in the running app. + +First you need to configure the Elasticsearch node: +``` +docker-compose -f docker-compose-dev.yaml up setup +``` +Once you see the +``` +setup_1 | All done, Kibble should...work now :) +``` +Now you can can launch Apache Kibble ui: +``` +docker-compose -f docker-compose-dev.yaml up ui +``` +The ui should be available under `http://0.0.0.0:8000` or `http://localhost:8000`. To log in you can use +the dummy admin account `admin@kibble` and password `kibbleAdmin`. + +You can also start only the API server: +``` +docker-compose -f docker-compose-dev.yaml up kibble +``` ## Code Quality diff --git a/Dockerfile.dev b/Dockerfile.dev new file mode 100644 index 00000000..ab76414d --- /dev/null +++ b/Dockerfile.dev @@ -0,0 +1,31 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +FROM python:3.6 + +USER root +RUN apt-get update +RUN apt-get install -y gcc unzip + +COPY ./api /kibble/api/ +COPY ./setup /kibble/setup/ +COPY ./ui /kibble/ui/ + +RUN pip install --upgrade pip +RUN pip install -r /kibble/setup/requirements.txt + +WORKDIR /kibble diff --git a/docker-compose-dev.yaml b/docker-compose-dev.yaml new file mode 100644 index 00000000..bb7f1d2a --- /dev/null +++ b/docker-compose-dev.yaml @@ -0,0 +1,64 @@ +version: '3' + +services: + # Helper service to setup the Apache Kibble es node + setup: + image: &img apache/kibble + build: + context: . + dockerfile: Dockerfile.dev + command: bash -c "python setup/setup.py -e elasticsearch -a -k" + volumes: + - ./setup/:/kibble/setup/ + depends_on: + - elasticsearch + + # Apache Kibble API server + kibble: + image: *img + command: bash -c "cd api && gunicorn --reload -w 1 -b 0.0.0.0:8001 handler:application" + expose: + - 8001 + ports: + - 8001:8001 + volumes: + - ./api/:/kibble/api/ + - ./setup/:/kibble/setup/ + - ./ui/:/kibble/ui/ + depends_on: + - elasticsearch + + # Apache Kibble web ui server + ui: + image: nginx:latest + volumes: + - ./nginx-dev.conf:/etc/nginx/nginx.conf + - ./ui/:/kibble/ui/ + ports: + - 8000:8000 + depends_on: + - kibble + + # Elasticsearch node required as a database for Apache Kibble + elasticsearch: + image: elasticsearch:7.9.2 + ports: + - "9200:9200" + - "9300:9300" + environment: + node.name: es01 + discovery.seed_hosts: es02 + cluster.initial_master_nodes: es01 + cluster.name: traefik-tutorial-cluster + bootstrap.memory_lock: "true" + ES_JAVA_OPTS: -Xms256m -Xmx256m + volumes: + - "kibble-es-data:/usr/share/elasticsearch/data" + ulimits: + memlock: + soft: -1 + hard: -1 + +volumes: + # named volumes can be managed easier using docker-compose + kibble-es-data: diff --git a/nginx-dev.conf b/nginx-dev.conf new file mode 100644 index 00000000..22270d31 --- /dev/null +++ b/nginx-dev.conf @@ -0,0 +1,34 @@ +events {} +http { + server { + listen 8000; + + server_name kibble; + + access_log /var/log/nginx/kibble_access.log; + error_log /var/log/nginx/kibble_error.log; + + proxy_set_header Host $http_host; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Host $host; + proxy_set_header X-Forwarded-Port $server_port; + proxy_set_header X-Forwarded-Proto $scheme; + + root /kibble/ui; + index index.html; + + location / { + try_files $uri $uri/ =404; + } + + location ~ /css { + add_header Content-Type text/css; + } + + # Reverse proxy to Apache Kibble API + location /api { + proxy_pass http://kibble:8001; + rewrite ^/api(.*)/$ $1 break; + } + } +} diff --git a/setup/requirements.txt b/setup/requirements.txt index 24b3bca6..6a2d1f3a 100644 --- a/setup/requirements.txt +++ b/setup/requirements.txt @@ -1,6 +1,8 @@ -certifi -pyyaml -bcrypt -elasticsearch -pre-commit -python-dateutil +bcrypt==3.2.0 +certifi==2020.6.20 +elasticsearch==7.9.1 +gunicorn==20.0.4 +pre-commit==2.7.1 +python-dateutil==2.8.1 +PyYAML==5.3.1 +tenacity==6.2.0 diff --git a/setup/setup.py b/setup/setup.py index 19b20898..689a2971 100644 --- a/setup/setup.py +++ b/setup/setup.py @@ -23,6 +23,7 @@ import logging from getpass import getpass +import tenacity import yaml import bcrypt import json @@ -312,20 +313,26 @@ def main(): admin_pass = get_user_input("Enter a password for the administrator account:", secure=True) # Create Elasticsearch index - try: - create_es_index( - hostname=args.hostname, - port=int(args.port), - dbname=args.dbname, - shards=int(args.shards), - replicas=int(args.replicas), - admin_name=admin_name, - admin_pass=admin_pass, - skiponexist=args.skiponexist, - ) - except Exception as e: - print("Index creation failed: %s" % e) - sys.exit(1) + # Retry in case ES is not yet up + print(f"Elasticsearch: {args.hostname}:{args.port}") + for attempt in tenacity.Retrying( + retry=tenacity.retry_if_exception_type(exception_types=Exception), + wait=tenacity.wait_fixed(10), + stop=tenacity.stop_after_attempt(10), + reraise=True + ): + with attempt: + print("Trying to create ES index...") + create_es_index( + hostname=args.hostname, + port=int(args.port), + dbname=args.dbname, + shards=int(args.shards), + replicas=int(args.replicas), + admin_name=admin_name, + admin_pass=admin_pass, + skiponexist=args.skiponexist, + ) print() # Create Kibble configuration file From 5e18332502d80992c093254adc61f6d52aea7568 Mon Sep 17 00:00:00 2001 From: Tomek Urbaszek Date: Sat, 24 Oct 2020 14:06:48 +0200 Subject: [PATCH 07/48] Refactor api to be a package (#51) Closes: #49 --- api/__init__.py | 16 ++++++++++++++++ api/handler.py | 35 ++++++++++++++++++----------------- api/pages/__init__.py | 5 ++--- api/plugins/database.py | 11 ++++------- api/plugins/openapi.py | 2 ++ api/plugins/session.py | 7 +------ docker-compose-dev.yaml | 2 +- 7 files changed, 44 insertions(+), 34 deletions(-) create mode 100644 api/__init__.py diff --git a/api/__init__.py b/api/__init__.py new file mode 100644 index 00000000..13a83393 --- /dev/null +++ b/api/__init__.py @@ -0,0 +1,16 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. diff --git a/api/handler.py b/api/handler.py index 124252c4..466d4b5e 100644 --- a/api/handler.py +++ b/api/handler.py @@ -23,36 +23,39 @@ and if a URL matches it runs the specific submodule's run() function. It also handles CGI parsing and exceptions in the applications. """ - - -# Main imports -import cgi +import os import re import sys import traceback import yaml import json -import plugins.session -import plugins.database -import plugins.openapi + +from api.plugins import openapi +from api.plugins.database import KibbleDatabase +from api.plugins.session import KibbleSession + # Compile valid API URLs from the pages library # Allow backwards compatibility by also accepting .lua URLs urls = [] if __name__ != '__main__': - import pages - for page in pages.handlers: - urls.append((r"^(/api/%s)(/.+)?$" % page, pages.handlers[page].run)) + from api.pages import handlers + for page, handler in handlers.items(): + urls.append((r"^(/api/%s)(/.+)?$" % page, handler.run)) # Load Kibble master configuration -config = yaml.load(open("yaml/kibble.yaml")) +config_yaml = os.path.join(os.path.dirname(os.path.realpath(__file__)), "yaml", "kibble.yaml") +with open(config_yaml, "r") as f: + config = yaml.load(f) # Instantiate database connections DB = None # Load Open API specifications -KibbleOpenAPI = plugins.openapi.OpenAPI("yaml/openapi.yaml") +openapi_yaml = os.path.join(os.path.dirname(os.path.realpath(__file__)), "yaml", "openapi.yaml") +KibbleOpenAPI = openapi.OpenAPI(openapi_yaml) + class KibbleHTTPError(Exception): def __init__(self, code, message): @@ -72,7 +75,6 @@ def __init__(self, path, func): def __call__(self, environ, start_response, session): """Run the function, return response OR return stacktrace""" - response = None try: # Read JSON client data if any try: @@ -96,7 +98,7 @@ def __call__(self, environ, start_response, session): # Validate URL against OpenAPI specs try: self.API.validate(environ['REQUEST_METHOD'], self.path, formdata) - except plugins.openapi.OpenAPIException as err: + except openapi.OpenAPIException as err: start_response('400 Invalid request', [ ('Content-Type', 'application/json')]) yield json.dumps({ @@ -161,13 +163,13 @@ def application(environ, start_response): Checks against the pages library, and if submod found, runs it and returns the output. """ - DB = plugins.database.KibbleDatabase(config) + db = KibbleDatabase(config) path = environ.get('PATH_INFO', '') for regex, function in urls: m = re.match(regex, path) if m: callback = KibbleAPIWrapper(path, function) - session = plugins.session.KibbleSession(DB, environ, config) + session = KibbleSession(db, environ, config) a = 0 for bucket in callback(environ, start_response, session): if a == 0: @@ -188,6 +190,5 @@ def application(environ, start_response): yield bytes(bucket, encoding = 'utf-8') - if __name__ == '__main__': KibbleOpenAPI.toHTML() diff --git a/api/pages/__init__.py b/api/pages/__init__.py index af0f5823..f3ba204f 100644 --- a/api/pages/__init__.py +++ b/api/pages/__init__.py @@ -26,7 +26,7 @@ import os # Define all the submodules we have -rootpath = os.path.dirname(__file__) +rootpath = os.path.join(os.path.dirname(os.path.realpath(__file__))) print("Reading pages from %s" % rootpath) # Import each submodule into a hash called 'handlers' @@ -42,6 +42,5 @@ def loadPage(path): p = filepath.replace(rootpath, "")[1:].replace('/', '.')[:-3] xp = p.replace('.', '/') print("Loading endpoint pages.%s as %s" % (p, xp)) - handlers[xp] = importlib.import_module("pages.%s" % p) - + handlers[xp] = importlib.import_module("api.pages.%s" % p) loadPage(rootpath) diff --git a/api/plugins/database.py b/api/plugins/database.py index 913f41dc..c36001de 100644 --- a/api/plugins/database.py +++ b/api/plugins/database.py @@ -20,17 +20,13 @@ It stores the elasticsearch handler and config options. """ - -# Main imports -import cgi -import re -#import aaa import elasticsearch + class KibbleESWrapper(object): """ - Class for rewriting old-style queries to the new ones, - where doc_type is an integral part of the DB name + Class for rewriting old-style queries to the new ones, + where doc_type is an integral part of the DB name """ def __init__(self, ES): self.ES = ES @@ -65,6 +61,7 @@ def count(self, index, doc_type = '*', body = None): body = body ) + class KibbleESWrapperSeven(object): """ Class for rewriting old-style queries to the >= 7.x ones, diff --git a/api/plugins/openapi.py b/api/plugins/openapi.py index 044ff882..f2894700 100644 --- a/api/plugins/openapi.py +++ b/api/plugins/openapi.py @@ -27,10 +27,12 @@ import operator import re + class OpenAPIException(Exception): def __init__(self, message): self.message = message + # Python type names to JSON type names py2JSON = { 'int': 'integer', diff --git a/api/plugins/session.py b/api/plugins/session.py index 56fb87c4..6c212f51 100644 --- a/api/plugins/session.py +++ b/api/plugins/session.py @@ -20,17 +20,12 @@ It handles setting/getting cookies and user prefs """ - -# Main imports -import cgi import re -import sys -import traceback import http.cookies import uuid -import elasticsearch import time + class KibbleSession(object): def getView(self, viewID): diff --git a/docker-compose-dev.yaml b/docker-compose-dev.yaml index bb7f1d2a..c53e9905 100644 --- a/docker-compose-dev.yaml +++ b/docker-compose-dev.yaml @@ -16,7 +16,7 @@ services: # Apache Kibble API server kibble: image: *img - command: bash -c "cd api && gunicorn --reload -w 1 -b 0.0.0.0:8001 handler:application" + command: bash -c "gunicorn --reload -w 1 -b 0.0.0.0:8001 api.handler:application" expose: - 8001 ports: From edb9a91165ff3e3eebbee99db6c86ddf87c362f4 Mon Sep 17 00:00:00 2001 From: Tomek Urbaszek Date: Sat, 24 Oct 2020 17:34:39 +0200 Subject: [PATCH 08/48] Make Kibble a package (#67) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This commit refactors the code base to create an installable kibble application. Co-authored-by: Michał Słowikowski --- .gitignore | 1 + CONTRIBUTING.md | 5 + Dockerfile.dev | 12 +- docker-compose-dev.yaml | 10 +- {api => kibble}/__init__.py | 0 kibble/__main__.py | 23 ++++ kibble/api/__init__.py | 16 +++ {api => kibble/api}/handler.py | 15 +-- {api => kibble/api}/pages/__init__.py | 2 +- {api => kibble/api}/pages/account.py | 0 {api => kibble/api}/pages/bio/bio.py | 0 {api => kibble/api}/pages/bio/newtimers.py | 0 {api => kibble/api}/pages/bio/trends.py | 0 {api => kibble/api}/pages/ci/queue.py | 0 {api => kibble/api}/pages/ci/status.py | 0 .../api}/pages/ci/top-buildcount.py | 0 {api => kibble/api}/pages/ci/top-buildtime.py | 0 {api => kibble/api}/pages/code/changes.py | 0 {api => kibble/api}/pages/code/commits.py | 0 {api => kibble/api}/pages/code/committers.py | 0 {api => kibble/api}/pages/code/evolution.py | 0 .../api}/pages/code/pony-timeseries.py | 0 {api => kibble/api}/pages/code/pony.py | 0 {api => kibble/api}/pages/code/punchcard.py | 0 .../api}/pages/code/relationships.py | 0 {api => kibble/api}/pages/code/retention.py | 0 {api => kibble/api}/pages/code/sloc.py | 0 {api => kibble/api}/pages/code/top-commits.py | 0 {api => kibble/api}/pages/code/top-sloc.py | 0 {api => kibble/api}/pages/code/trends.py | 0 {api => kibble/api}/pages/filters.py | 0 {api => kibble/api}/pages/forum/actors.py | 0 {api => kibble/api}/pages/forum/creators.py | 0 {api => kibble/api}/pages/forum/issues.py | 0 {api => kibble/api}/pages/forum/responders.py | 0 {api => kibble/api}/pages/forum/top-count.py | 0 {api => kibble/api}/pages/forum/top.py | 0 {api => kibble/api}/pages/forum/trends.py | 0 {api => kibble/api}/pages/issue/actors.py | 0 {api => kibble/api}/pages/issue/age.py | 0 {api => kibble/api}/pages/issue/closers.py | 0 {api => kibble/api}/pages/issue/issues.py | 0 {api => kibble/api}/pages/issue/openers.py | 0 .../api}/pages/issue/pony-timeseries.py | 0 .../api}/pages/issue/relationships.py | 0 {api => kibble/api}/pages/issue/retention.py | 0 {api => kibble/api}/pages/issue/top-count.py | 0 {api => kibble/api}/pages/issue/top.py | 0 {api => kibble/api}/pages/issue/trends.py | 0 {api => kibble/api}/pages/mail/keyphrases.py | 0 {api => kibble/api}/pages/mail/map.py | 0 .../api}/pages/mail/mood-timeseries.py | 0 {api => kibble/api}/pages/mail/mood.py | 0 .../api}/pages/mail/pony-timeseries.py | 0 .../api}/pages/mail/relationships.py | 0 {api => kibble/api}/pages/mail/retention.py | 0 .../api}/pages/mail/timeseries-single.py | 0 {api => kibble/api}/pages/mail/timeseries.py | 0 {api => kibble/api}/pages/mail/top-authors.py | 0 {api => kibble/api}/pages/mail/top-topics.py | 0 {api => kibble/api}/pages/mail/trends.py | 0 {api => kibble/api}/pages/org/contributors.py | 0 {api => kibble/api}/pages/org/list.py | 0 {api => kibble/api}/pages/org/members.py | 0 {api => kibble/api}/pages/org/sourcetypes.py | 8 +- {api => kibble/api}/pages/org/trends.py | 0 {api => kibble/api}/pages/session.py | 0 {api => kibble/api}/pages/sources.py | 7 +- {api => kibble/api}/pages/verify.py | 0 {api => kibble/api}/pages/views.py | 0 {api => kibble/api}/pages/widgets.py | 7 +- {api => kibble/api}/plugins/database.py | 0 {api => kibble/api}/plugins/openapi.py | 0 {api => kibble/api}/plugins/session.py | 0 {api => kibble/api}/yaml/openapi.yaml | 0 {api => kibble/api}/yaml/openapi/combine.py | 7 +- .../components/schemas/ActionCompleted.yaml | 0 .../openapi/components/schemas/Biography.yaml | 0 .../components/schemas/CommitterList.yaml | 0 .../openapi/components/schemas/Empty.yaml | 0 .../openapi/components/schemas/Error.yaml | 0 .../openapi/components/schemas/Factor.yaml | 0 .../openapi/components/schemas/NewOrg.yaml | 0 .../components/schemas/OrgMembers.yaml | 0 .../components/schemas/Organisation.yaml | 0 .../openapi/components/schemas/Phrase.yaml | 0 .../components/schemas/PhraseList.yaml | 0 .../yaml/openapi/components/schemas/Sloc.yaml | 0 .../openapi/components/schemas/Source.yaml | 0 .../openapi/components/schemas/SourceID.yaml | 0 .../components/schemas/SourceList.yaml | 0 .../components/schemas/SourceListAdd.yaml | 0 .../components/schemas/SourceType.yaml | 0 .../components/schemas/SourceTypes.yaml | 0 .../components/schemas/Timeseries.yaml | 0 .../components/schemas/TimeseriesObject.yaml | 0 .../openapi/components/schemas/TopList.yaml | 0 .../openapi/components/schemas/Trend.yaml | 0 .../components/schemas/UserAccount.yaml | 0 .../components/schemas/UserAccountEdit.yaml | 0 .../components/schemas/UserCredentials.yaml | 0 .../openapi/components/schemas/UserData.yaml | 0 .../openapi/components/schemas/UserName.yaml | 0 .../yaml/openapi/components/schemas/View.yaml | 0 .../openapi/components/schemas/ViewList.yaml | 0 .../openapi/components/schemas/WidgetApp.yaml | 0 .../components/schemas/WidgetDesign.yaml | 0 .../openapi/components/schemas/WidgetRow.yaml | 0 .../components/schemas/defaultWidgetArgs.yaml | 0 .../openapi/components/schemas/editView.yaml | 0 .../securitySchemes/cookieAuth.yaml | 0 {api => kibble/api}/yaml/sourcetypes.yaml | 0 {api => kibble/api}/yaml/widgets.yaml | 0 kibble/settings.py | 25 ++++ {setup => kibble/setup}/kibble.yaml.sample | 0 {setup => kibble/setup}/makeaccount.py | 6 +- {setup => kibble/setup}/mappings.json | 0 {setup => kibble/setup}/setup.py | 10 +- kibble/version.py | 18 +++ setup.cfg | 36 ++++++ setup.py | 111 ++++++++++++++++++ setup/requirements.txt | 8 -- 122 files changed, 284 insertions(+), 43 deletions(-) rename {api => kibble}/__init__.py (100%) create mode 100644 kibble/__main__.py create mode 100644 kibble/api/__init__.py rename {api => kibble/api}/handler.py (94%) rename {api => kibble/api}/pages/__init__.py (95%) rename {api => kibble/api}/pages/account.py (100%) rename {api => kibble/api}/pages/bio/bio.py (100%) rename {api => kibble/api}/pages/bio/newtimers.py (100%) rename {api => kibble/api}/pages/bio/trends.py (100%) rename {api => kibble/api}/pages/ci/queue.py (100%) rename {api => kibble/api}/pages/ci/status.py (100%) rename {api => kibble/api}/pages/ci/top-buildcount.py (100%) rename {api => kibble/api}/pages/ci/top-buildtime.py (100%) rename {api => kibble/api}/pages/code/changes.py (100%) rename {api => kibble/api}/pages/code/commits.py (100%) rename {api => kibble/api}/pages/code/committers.py (100%) rename {api => kibble/api}/pages/code/evolution.py (100%) rename {api => kibble/api}/pages/code/pony-timeseries.py (100%) rename {api => kibble/api}/pages/code/pony.py (100%) rename {api => kibble/api}/pages/code/punchcard.py (100%) rename {api => kibble/api}/pages/code/relationships.py (100%) rename {api => kibble/api}/pages/code/retention.py (100%) rename {api => kibble/api}/pages/code/sloc.py (100%) rename {api => kibble/api}/pages/code/top-commits.py (100%) rename {api => kibble/api}/pages/code/top-sloc.py (100%) rename {api => kibble/api}/pages/code/trends.py (100%) rename {api => kibble/api}/pages/filters.py (100%) rename {api => kibble/api}/pages/forum/actors.py (100%) rename {api => kibble/api}/pages/forum/creators.py (100%) rename {api => kibble/api}/pages/forum/issues.py (100%) rename {api => kibble/api}/pages/forum/responders.py (100%) rename {api => kibble/api}/pages/forum/top-count.py (100%) rename {api => kibble/api}/pages/forum/top.py (100%) rename {api => kibble/api}/pages/forum/trends.py (100%) rename {api => kibble/api}/pages/issue/actors.py (100%) rename {api => kibble/api}/pages/issue/age.py (100%) rename {api => kibble/api}/pages/issue/closers.py (100%) rename {api => kibble/api}/pages/issue/issues.py (100%) rename {api => kibble/api}/pages/issue/openers.py (100%) rename {api => kibble/api}/pages/issue/pony-timeseries.py (100%) rename {api => kibble/api}/pages/issue/relationships.py (100%) rename {api => kibble/api}/pages/issue/retention.py (100%) rename {api => kibble/api}/pages/issue/top-count.py (100%) rename {api => kibble/api}/pages/issue/top.py (100%) rename {api => kibble/api}/pages/issue/trends.py (100%) rename {api => kibble/api}/pages/mail/keyphrases.py (100%) rename {api => kibble/api}/pages/mail/map.py (100%) rename {api => kibble/api}/pages/mail/mood-timeseries.py (100%) rename {api => kibble/api}/pages/mail/mood.py (100%) rename {api => kibble/api}/pages/mail/pony-timeseries.py (100%) rename {api => kibble/api}/pages/mail/relationships.py (100%) rename {api => kibble/api}/pages/mail/retention.py (100%) rename {api => kibble/api}/pages/mail/timeseries-single.py (100%) rename {api => kibble/api}/pages/mail/timeseries.py (100%) rename {api => kibble/api}/pages/mail/top-authors.py (100%) rename {api => kibble/api}/pages/mail/top-topics.py (100%) rename {api => kibble/api}/pages/mail/trends.py (100%) rename {api => kibble/api}/pages/org/contributors.py (100%) rename {api => kibble/api}/pages/org/list.py (100%) rename {api => kibble/api}/pages/org/members.py (100%) rename {api => kibble/api}/pages/org/sourcetypes.py (93%) rename {api => kibble/api}/pages/org/trends.py (100%) rename {api => kibble/api}/pages/session.py (100%) rename {api => kibble/api}/pages/sources.py (98%) rename {api => kibble/api}/pages/verify.py (100%) rename {api => kibble/api}/pages/views.py (100%) rename {api => kibble/api}/pages/widgets.py (93%) rename {api => kibble/api}/plugins/database.py (100%) rename {api => kibble/api}/plugins/openapi.py (100%) rename {api => kibble/api}/plugins/session.py (100%) rename {api => kibble/api}/yaml/openapi.yaml (100%) rename {api => kibble/api}/yaml/openapi/combine.py (96%) rename {api => kibble/api}/yaml/openapi/components/schemas/ActionCompleted.yaml (100%) rename {api => kibble/api}/yaml/openapi/components/schemas/Biography.yaml (100%) rename {api => kibble/api}/yaml/openapi/components/schemas/CommitterList.yaml (100%) rename {api => kibble/api}/yaml/openapi/components/schemas/Empty.yaml (100%) rename {api => kibble/api}/yaml/openapi/components/schemas/Error.yaml (100%) rename {api => kibble/api}/yaml/openapi/components/schemas/Factor.yaml (100%) rename {api => kibble/api}/yaml/openapi/components/schemas/NewOrg.yaml (100%) rename {api => kibble/api}/yaml/openapi/components/schemas/OrgMembers.yaml (100%) rename {api => kibble/api}/yaml/openapi/components/schemas/Organisation.yaml (100%) rename {api => kibble/api}/yaml/openapi/components/schemas/Phrase.yaml (100%) rename {api => kibble/api}/yaml/openapi/components/schemas/PhraseList.yaml (100%) rename {api => kibble/api}/yaml/openapi/components/schemas/Sloc.yaml (100%) rename {api => kibble/api}/yaml/openapi/components/schemas/Source.yaml (100%) rename {api => kibble/api}/yaml/openapi/components/schemas/SourceID.yaml (100%) rename {api => kibble/api}/yaml/openapi/components/schemas/SourceList.yaml (100%) rename {api => kibble/api}/yaml/openapi/components/schemas/SourceListAdd.yaml (100%) rename {api => kibble/api}/yaml/openapi/components/schemas/SourceType.yaml (100%) rename {api => kibble/api}/yaml/openapi/components/schemas/SourceTypes.yaml (100%) rename {api => kibble/api}/yaml/openapi/components/schemas/Timeseries.yaml (100%) rename {api => kibble/api}/yaml/openapi/components/schemas/TimeseriesObject.yaml (100%) rename {api => kibble/api}/yaml/openapi/components/schemas/TopList.yaml (100%) rename {api => kibble/api}/yaml/openapi/components/schemas/Trend.yaml (100%) rename {api => kibble/api}/yaml/openapi/components/schemas/UserAccount.yaml (100%) rename {api => kibble/api}/yaml/openapi/components/schemas/UserAccountEdit.yaml (100%) rename {api => kibble/api}/yaml/openapi/components/schemas/UserCredentials.yaml (100%) rename {api => kibble/api}/yaml/openapi/components/schemas/UserData.yaml (100%) rename {api => kibble/api}/yaml/openapi/components/schemas/UserName.yaml (100%) rename {api => kibble/api}/yaml/openapi/components/schemas/View.yaml (100%) rename {api => kibble/api}/yaml/openapi/components/schemas/ViewList.yaml (100%) rename {api => kibble/api}/yaml/openapi/components/schemas/WidgetApp.yaml (100%) rename {api => kibble/api}/yaml/openapi/components/schemas/WidgetDesign.yaml (100%) rename {api => kibble/api}/yaml/openapi/components/schemas/WidgetRow.yaml (100%) rename {api => kibble/api}/yaml/openapi/components/schemas/defaultWidgetArgs.yaml (100%) rename {api => kibble/api}/yaml/openapi/components/schemas/editView.yaml (100%) rename {api => kibble/api}/yaml/openapi/components/securitySchemes/cookieAuth.yaml (100%) rename {api => kibble/api}/yaml/sourcetypes.yaml (100%) rename {api => kibble/api}/yaml/widgets.yaml (100%) create mode 100644 kibble/settings.py rename {setup => kibble/setup}/kibble.yaml.sample (100%) rename {setup => kibble/setup}/makeaccount.py (96%) rename {setup => kibble/setup}/mappings.json (100%) rename {setup => kibble/setup}/setup.py (98%) create mode 100644 kibble/version.py create mode 100644 setup.cfg create mode 100644 setup.py delete mode 100644 setup/requirements.txt diff --git a/.gitignore b/.gitignore index d562a27b..f7ef7928 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,6 @@ # Apache Kibble files api/yaml/kibble.yaml +kibble/api/yaml/kibble.yaml # Byte-compiled / optimized / DLL files __pycache__/ diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index b3ef0dd1..d420751e 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -15,6 +15,11 @@ We also have: ## Development installation +You should be able to install Apache Kibble by simply doing: +``` +pip install -e ."[devel]" +``` + The easiest option to spin up a development environment is to use our development docker-compose. The development image has mounted all Kibble sources so all your local code changes will be automatically reflected in the running app. diff --git a/Dockerfile.dev b/Dockerfile.dev index ab76414d..690be03d 100644 --- a/Dockerfile.dev +++ b/Dockerfile.dev @@ -15,17 +15,15 @@ # specific language governing permissions and limitations # under the License. -FROM python:3.6 +FROM python:3.8 USER root RUN apt-get update RUN apt-get install -y gcc unzip -COPY ./api /kibble/api/ -COPY ./setup /kibble/setup/ -COPY ./ui /kibble/ui/ - -RUN pip install --upgrade pip -RUN pip install -r /kibble/setup/requirements.txt +COPY . /kibble/ WORKDIR /kibble + +RUN pip install --upgrade pip +RUN pip install -e . diff --git a/docker-compose-dev.yaml b/docker-compose-dev.yaml index c53e9905..be6f1363 100644 --- a/docker-compose-dev.yaml +++ b/docker-compose-dev.yaml @@ -7,24 +7,22 @@ services: build: context: . dockerfile: Dockerfile.dev - command: bash -c "python setup/setup.py -e elasticsearch -a -k" + command: bash -c "python kibble/setup/setup.py -e elasticsearch -a -k" volumes: - - ./setup/:/kibble/setup/ + - .:/kibble/ depends_on: - elasticsearch # Apache Kibble API server kibble: image: *img - command: bash -c "gunicorn --reload -w 1 -b 0.0.0.0:8001 api.handler:application" + command: bash -c "gunicorn --reload -w 1 -b 0.0.0.0:8001 kibble.api.handler:application" expose: - 8001 ports: - 8001:8001 volumes: - - ./api/:/kibble/api/ - - ./setup/:/kibble/setup/ - - ./ui/:/kibble/ui/ + - .:/kibble/ depends_on: - elasticsearch diff --git a/api/__init__.py b/kibble/__init__.py similarity index 100% rename from api/__init__.py rename to kibble/__init__.py diff --git a/kibble/__main__.py b/kibble/__main__.py new file mode 100644 index 00000000..c7a18f74 --- /dev/null +++ b/kibble/__main__.py @@ -0,0 +1,23 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +def main(): + print("Hello to kibble!") + + +if __name__ == '__main__': + main() diff --git a/kibble/api/__init__.py b/kibble/api/__init__.py new file mode 100644 index 00000000..13a83393 --- /dev/null +++ b/kibble/api/__init__.py @@ -0,0 +1,16 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. diff --git a/api/handler.py b/kibble/api/handler.py similarity index 94% rename from api/handler.py rename to kibble/api/handler.py index 466d4b5e..508a7c0c 100644 --- a/api/handler.py +++ b/kibble/api/handler.py @@ -30,30 +30,31 @@ import yaml import json -from api.plugins import openapi -from api.plugins.database import KibbleDatabase -from api.plugins.session import KibbleSession +from kibble.api.plugins import openapi +from kibble.api.plugins.database import KibbleDatabase +from kibble.api.plugins.session import KibbleSession # Compile valid API URLs from the pages library # Allow backwards compatibility by also accepting .lua URLs +from kibble.settings import KIBBLE_YAML, YAML_DIRECTORY + urls = [] if __name__ != '__main__': - from api.pages import handlers + from kibble.api.pages import handlers for page, handler in handlers.items(): urls.append((r"^(/api/%s)(/.+)?$" % page, handler.run)) # Load Kibble master configuration -config_yaml = os.path.join(os.path.dirname(os.path.realpath(__file__)), "yaml", "kibble.yaml") -with open(config_yaml, "r") as f: +with open(KIBBLE_YAML, "r") as f: config = yaml.load(f) # Instantiate database connections DB = None # Load Open API specifications -openapi_yaml = os.path.join(os.path.dirname(os.path.realpath(__file__)), "yaml", "openapi.yaml") +openapi_yaml = os.path.join(YAML_DIRECTORY, "openapi.yaml") KibbleOpenAPI = openapi.OpenAPI(openapi_yaml) diff --git a/api/pages/__init__.py b/kibble/api/pages/__init__.py similarity index 95% rename from api/pages/__init__.py rename to kibble/api/pages/__init__.py index f3ba204f..a9b1b9db 100644 --- a/api/pages/__init__.py +++ b/kibble/api/pages/__init__.py @@ -42,5 +42,5 @@ def loadPage(path): p = filepath.replace(rootpath, "")[1:].replace('/', '.')[:-3] xp = p.replace('.', '/') print("Loading endpoint pages.%s as %s" % (p, xp)) - handlers[xp] = importlib.import_module("api.pages.%s" % p) + handlers[xp] = importlib.import_module(f"kibble.api.pages.{p}") loadPage(rootpath) diff --git a/api/pages/account.py b/kibble/api/pages/account.py similarity index 100% rename from api/pages/account.py rename to kibble/api/pages/account.py diff --git a/api/pages/bio/bio.py b/kibble/api/pages/bio/bio.py similarity index 100% rename from api/pages/bio/bio.py rename to kibble/api/pages/bio/bio.py diff --git a/api/pages/bio/newtimers.py b/kibble/api/pages/bio/newtimers.py similarity index 100% rename from api/pages/bio/newtimers.py rename to kibble/api/pages/bio/newtimers.py diff --git a/api/pages/bio/trends.py b/kibble/api/pages/bio/trends.py similarity index 100% rename from api/pages/bio/trends.py rename to kibble/api/pages/bio/trends.py diff --git a/api/pages/ci/queue.py b/kibble/api/pages/ci/queue.py similarity index 100% rename from api/pages/ci/queue.py rename to kibble/api/pages/ci/queue.py diff --git a/api/pages/ci/status.py b/kibble/api/pages/ci/status.py similarity index 100% rename from api/pages/ci/status.py rename to kibble/api/pages/ci/status.py diff --git a/api/pages/ci/top-buildcount.py b/kibble/api/pages/ci/top-buildcount.py similarity index 100% rename from api/pages/ci/top-buildcount.py rename to kibble/api/pages/ci/top-buildcount.py diff --git a/api/pages/ci/top-buildtime.py b/kibble/api/pages/ci/top-buildtime.py similarity index 100% rename from api/pages/ci/top-buildtime.py rename to kibble/api/pages/ci/top-buildtime.py diff --git a/api/pages/code/changes.py b/kibble/api/pages/code/changes.py similarity index 100% rename from api/pages/code/changes.py rename to kibble/api/pages/code/changes.py diff --git a/api/pages/code/commits.py b/kibble/api/pages/code/commits.py similarity index 100% rename from api/pages/code/commits.py rename to kibble/api/pages/code/commits.py diff --git a/api/pages/code/committers.py b/kibble/api/pages/code/committers.py similarity index 100% rename from api/pages/code/committers.py rename to kibble/api/pages/code/committers.py diff --git a/api/pages/code/evolution.py b/kibble/api/pages/code/evolution.py similarity index 100% rename from api/pages/code/evolution.py rename to kibble/api/pages/code/evolution.py diff --git a/api/pages/code/pony-timeseries.py b/kibble/api/pages/code/pony-timeseries.py similarity index 100% rename from api/pages/code/pony-timeseries.py rename to kibble/api/pages/code/pony-timeseries.py diff --git a/api/pages/code/pony.py b/kibble/api/pages/code/pony.py similarity index 100% rename from api/pages/code/pony.py rename to kibble/api/pages/code/pony.py diff --git a/api/pages/code/punchcard.py b/kibble/api/pages/code/punchcard.py similarity index 100% rename from api/pages/code/punchcard.py rename to kibble/api/pages/code/punchcard.py diff --git a/api/pages/code/relationships.py b/kibble/api/pages/code/relationships.py similarity index 100% rename from api/pages/code/relationships.py rename to kibble/api/pages/code/relationships.py diff --git a/api/pages/code/retention.py b/kibble/api/pages/code/retention.py similarity index 100% rename from api/pages/code/retention.py rename to kibble/api/pages/code/retention.py diff --git a/api/pages/code/sloc.py b/kibble/api/pages/code/sloc.py similarity index 100% rename from api/pages/code/sloc.py rename to kibble/api/pages/code/sloc.py diff --git a/api/pages/code/top-commits.py b/kibble/api/pages/code/top-commits.py similarity index 100% rename from api/pages/code/top-commits.py rename to kibble/api/pages/code/top-commits.py diff --git a/api/pages/code/top-sloc.py b/kibble/api/pages/code/top-sloc.py similarity index 100% rename from api/pages/code/top-sloc.py rename to kibble/api/pages/code/top-sloc.py diff --git a/api/pages/code/trends.py b/kibble/api/pages/code/trends.py similarity index 100% rename from api/pages/code/trends.py rename to kibble/api/pages/code/trends.py diff --git a/api/pages/filters.py b/kibble/api/pages/filters.py similarity index 100% rename from api/pages/filters.py rename to kibble/api/pages/filters.py diff --git a/api/pages/forum/actors.py b/kibble/api/pages/forum/actors.py similarity index 100% rename from api/pages/forum/actors.py rename to kibble/api/pages/forum/actors.py diff --git a/api/pages/forum/creators.py b/kibble/api/pages/forum/creators.py similarity index 100% rename from api/pages/forum/creators.py rename to kibble/api/pages/forum/creators.py diff --git a/api/pages/forum/issues.py b/kibble/api/pages/forum/issues.py similarity index 100% rename from api/pages/forum/issues.py rename to kibble/api/pages/forum/issues.py diff --git a/api/pages/forum/responders.py b/kibble/api/pages/forum/responders.py similarity index 100% rename from api/pages/forum/responders.py rename to kibble/api/pages/forum/responders.py diff --git a/api/pages/forum/top-count.py b/kibble/api/pages/forum/top-count.py similarity index 100% rename from api/pages/forum/top-count.py rename to kibble/api/pages/forum/top-count.py diff --git a/api/pages/forum/top.py b/kibble/api/pages/forum/top.py similarity index 100% rename from api/pages/forum/top.py rename to kibble/api/pages/forum/top.py diff --git a/api/pages/forum/trends.py b/kibble/api/pages/forum/trends.py similarity index 100% rename from api/pages/forum/trends.py rename to kibble/api/pages/forum/trends.py diff --git a/api/pages/issue/actors.py b/kibble/api/pages/issue/actors.py similarity index 100% rename from api/pages/issue/actors.py rename to kibble/api/pages/issue/actors.py diff --git a/api/pages/issue/age.py b/kibble/api/pages/issue/age.py similarity index 100% rename from api/pages/issue/age.py rename to kibble/api/pages/issue/age.py diff --git a/api/pages/issue/closers.py b/kibble/api/pages/issue/closers.py similarity index 100% rename from api/pages/issue/closers.py rename to kibble/api/pages/issue/closers.py diff --git a/api/pages/issue/issues.py b/kibble/api/pages/issue/issues.py similarity index 100% rename from api/pages/issue/issues.py rename to kibble/api/pages/issue/issues.py diff --git a/api/pages/issue/openers.py b/kibble/api/pages/issue/openers.py similarity index 100% rename from api/pages/issue/openers.py rename to kibble/api/pages/issue/openers.py diff --git a/api/pages/issue/pony-timeseries.py b/kibble/api/pages/issue/pony-timeseries.py similarity index 100% rename from api/pages/issue/pony-timeseries.py rename to kibble/api/pages/issue/pony-timeseries.py diff --git a/api/pages/issue/relationships.py b/kibble/api/pages/issue/relationships.py similarity index 100% rename from api/pages/issue/relationships.py rename to kibble/api/pages/issue/relationships.py diff --git a/api/pages/issue/retention.py b/kibble/api/pages/issue/retention.py similarity index 100% rename from api/pages/issue/retention.py rename to kibble/api/pages/issue/retention.py diff --git a/api/pages/issue/top-count.py b/kibble/api/pages/issue/top-count.py similarity index 100% rename from api/pages/issue/top-count.py rename to kibble/api/pages/issue/top-count.py diff --git a/api/pages/issue/top.py b/kibble/api/pages/issue/top.py similarity index 100% rename from api/pages/issue/top.py rename to kibble/api/pages/issue/top.py diff --git a/api/pages/issue/trends.py b/kibble/api/pages/issue/trends.py similarity index 100% rename from api/pages/issue/trends.py rename to kibble/api/pages/issue/trends.py diff --git a/api/pages/mail/keyphrases.py b/kibble/api/pages/mail/keyphrases.py similarity index 100% rename from api/pages/mail/keyphrases.py rename to kibble/api/pages/mail/keyphrases.py diff --git a/api/pages/mail/map.py b/kibble/api/pages/mail/map.py similarity index 100% rename from api/pages/mail/map.py rename to kibble/api/pages/mail/map.py diff --git a/api/pages/mail/mood-timeseries.py b/kibble/api/pages/mail/mood-timeseries.py similarity index 100% rename from api/pages/mail/mood-timeseries.py rename to kibble/api/pages/mail/mood-timeseries.py diff --git a/api/pages/mail/mood.py b/kibble/api/pages/mail/mood.py similarity index 100% rename from api/pages/mail/mood.py rename to kibble/api/pages/mail/mood.py diff --git a/api/pages/mail/pony-timeseries.py b/kibble/api/pages/mail/pony-timeseries.py similarity index 100% rename from api/pages/mail/pony-timeseries.py rename to kibble/api/pages/mail/pony-timeseries.py diff --git a/api/pages/mail/relationships.py b/kibble/api/pages/mail/relationships.py similarity index 100% rename from api/pages/mail/relationships.py rename to kibble/api/pages/mail/relationships.py diff --git a/api/pages/mail/retention.py b/kibble/api/pages/mail/retention.py similarity index 100% rename from api/pages/mail/retention.py rename to kibble/api/pages/mail/retention.py diff --git a/api/pages/mail/timeseries-single.py b/kibble/api/pages/mail/timeseries-single.py similarity index 100% rename from api/pages/mail/timeseries-single.py rename to kibble/api/pages/mail/timeseries-single.py diff --git a/api/pages/mail/timeseries.py b/kibble/api/pages/mail/timeseries.py similarity index 100% rename from api/pages/mail/timeseries.py rename to kibble/api/pages/mail/timeseries.py diff --git a/api/pages/mail/top-authors.py b/kibble/api/pages/mail/top-authors.py similarity index 100% rename from api/pages/mail/top-authors.py rename to kibble/api/pages/mail/top-authors.py diff --git a/api/pages/mail/top-topics.py b/kibble/api/pages/mail/top-topics.py similarity index 100% rename from api/pages/mail/top-topics.py rename to kibble/api/pages/mail/top-topics.py diff --git a/api/pages/mail/trends.py b/kibble/api/pages/mail/trends.py similarity index 100% rename from api/pages/mail/trends.py rename to kibble/api/pages/mail/trends.py diff --git a/api/pages/org/contributors.py b/kibble/api/pages/org/contributors.py similarity index 100% rename from api/pages/org/contributors.py rename to kibble/api/pages/org/contributors.py diff --git a/api/pages/org/list.py b/kibble/api/pages/org/list.py similarity index 100% rename from api/pages/org/list.py rename to kibble/api/pages/org/list.py diff --git a/api/pages/org/members.py b/kibble/api/pages/org/members.py similarity index 100% rename from api/pages/org/members.py rename to kibble/api/pages/org/members.py diff --git a/api/pages/org/sourcetypes.py b/kibble/api/pages/org/sourcetypes.py similarity index 93% rename from api/pages/org/sourcetypes.py rename to kibble/api/pages/org/sourcetypes.py index 8c41b007..005f3c87 100644 --- a/api/pages/org/sourcetypes.py +++ b/kibble/api/pages/org/sourcetypes.py @@ -68,12 +68,16 @@ """ This is the source types handler for Kibble """ +import os import yaml import json -def run(API, environ, indata, session): +from kibble.settings import YAML_DIRECTORY + - types = yaml.load(open("yaml/sourcetypes.yaml")) +def run(API, environ, indata, session): + with open(os.path.join(YAML_DIRECTORY, "sourcetypes.yaml")) as f: + types = yaml.load(f) yield json.dumps(types) diff --git a/api/pages/org/trends.py b/kibble/api/pages/org/trends.py similarity index 100% rename from api/pages/org/trends.py rename to kibble/api/pages/org/trends.py diff --git a/api/pages/session.py b/kibble/api/pages/session.py similarity index 100% rename from api/pages/session.py rename to kibble/api/pages/session.py diff --git a/api/pages/sources.py b/kibble/api/pages/sources.py similarity index 98% rename from api/pages/sources.py rename to kibble/api/pages/sources.py index 86626847..49f9fec6 100644 --- a/api/pages/sources.py +++ b/kibble/api/pages/sources.py @@ -128,11 +128,15 @@ """ import json +import os import re import time import hashlib import yaml +from kibble.settings import YAML_DIRECTORY + + def canModifySource(session): """ Determine if the user can edit sources in this org """ @@ -224,7 +228,8 @@ def run(API, environ, indata, session): if canModifySource(session): new = 0 old = 0 - stypes = yaml.load(open("yaml/sourcetypes.yaml")) + with open(os.path.join(YAML_DIRECTORY, "sourcetypes.yaml")) as f: + stypes = yaml.load(f) for source in indata.get('sources', []): sourceURL = source['sourceURL'] sourceType = source['type'] diff --git a/api/pages/verify.py b/kibble/api/pages/verify.py similarity index 100% rename from api/pages/verify.py rename to kibble/api/pages/verify.py diff --git a/api/pages/views.py b/kibble/api/pages/views.py similarity index 100% rename from api/pages/views.py rename to kibble/api/pages/views.py diff --git a/api/pages/widgets.py b/kibble/api/pages/widgets.py similarity index 93% rename from api/pages/widgets.py rename to kibble/api/pages/widgets.py index db3f5fdf..6b101e21 100644 --- a/api/pages/widgets.py +++ b/kibble/api/pages/widgets.py @@ -46,16 +46,21 @@ """ This is the widget design handler for Kibble """ +import os import yaml import json +from kibble.settings import YAML_DIRECTORY + + def run(API, environ, indata, session): if not session.user: raise API.exception(403, "You must be logged in to use this API endpoint! %s") - widgets = yaml.load(open("yaml/widgets.yaml")) + with open(os.path.join(YAML_DIRECTORY, "widgets.yaml")) as f: + widgets = yaml.load(f) page = indata['pageid'] if not page or page == '0': diff --git a/api/plugins/database.py b/kibble/api/plugins/database.py similarity index 100% rename from api/plugins/database.py rename to kibble/api/plugins/database.py diff --git a/api/plugins/openapi.py b/kibble/api/plugins/openapi.py similarity index 100% rename from api/plugins/openapi.py rename to kibble/api/plugins/openapi.py diff --git a/api/plugins/session.py b/kibble/api/plugins/session.py similarity index 100% rename from api/plugins/session.py rename to kibble/api/plugins/session.py diff --git a/api/yaml/openapi.yaml b/kibble/api/yaml/openapi.yaml similarity index 100% rename from api/yaml/openapi.yaml rename to kibble/api/yaml/openapi.yaml diff --git a/api/yaml/openapi/combine.py b/kibble/api/yaml/openapi/combine.py similarity index 96% rename from api/yaml/openapi/combine.py rename to kibble/api/yaml/openapi/combine.py index 689a0f2f..962021ca 100644 --- a/api/yaml/openapi/combine.py +++ b/kibble/api/yaml/openapi/combine.py @@ -20,6 +20,8 @@ import sys import re +from kibble.settings import YAML_DIRECTORY + baseyaml = """ # THIS IS PULLED FROM SCRIPTS AND AUTOGENERATED! # Please use openapi/combine.py to regenerate! @@ -106,7 +108,8 @@ def construct(): if fname.endswith(".py"): fpath = "%s/%s" % (apidir, fname) print("Scanning %s" % fpath) - contents = open(fpath, "r").read() + with open(fpath, "r") as f: + contents = f.read() m = re.search(r"OPENAPI-URI: (\S+)\n##+\n([\s\S]+?)##+", contents) if m: apath = m.group(1) @@ -128,7 +131,7 @@ def construct(): print("Scanning %s" % fpath) defs = yaml.load(open(fpath)) yml['components'][d][fname.replace(".yaml", "")] = defs - ypath = os.path.abspath("%s/../openapi.yaml" % bpath) + ypath = os.path.join(YAML_DIRECTORY, "openapi.yaml") with open(ypath, "w") as f: f.write(baseyaml) f.write(yaml.dump(yml, default_flow_style=False)) diff --git a/api/yaml/openapi/components/schemas/ActionCompleted.yaml b/kibble/api/yaml/openapi/components/schemas/ActionCompleted.yaml similarity index 100% rename from api/yaml/openapi/components/schemas/ActionCompleted.yaml rename to kibble/api/yaml/openapi/components/schemas/ActionCompleted.yaml diff --git a/api/yaml/openapi/components/schemas/Biography.yaml b/kibble/api/yaml/openapi/components/schemas/Biography.yaml similarity index 100% rename from api/yaml/openapi/components/schemas/Biography.yaml rename to kibble/api/yaml/openapi/components/schemas/Biography.yaml diff --git a/api/yaml/openapi/components/schemas/CommitterList.yaml b/kibble/api/yaml/openapi/components/schemas/CommitterList.yaml similarity index 100% rename from api/yaml/openapi/components/schemas/CommitterList.yaml rename to kibble/api/yaml/openapi/components/schemas/CommitterList.yaml diff --git a/api/yaml/openapi/components/schemas/Empty.yaml b/kibble/api/yaml/openapi/components/schemas/Empty.yaml similarity index 100% rename from api/yaml/openapi/components/schemas/Empty.yaml rename to kibble/api/yaml/openapi/components/schemas/Empty.yaml diff --git a/api/yaml/openapi/components/schemas/Error.yaml b/kibble/api/yaml/openapi/components/schemas/Error.yaml similarity index 100% rename from api/yaml/openapi/components/schemas/Error.yaml rename to kibble/api/yaml/openapi/components/schemas/Error.yaml diff --git a/api/yaml/openapi/components/schemas/Factor.yaml b/kibble/api/yaml/openapi/components/schemas/Factor.yaml similarity index 100% rename from api/yaml/openapi/components/schemas/Factor.yaml rename to kibble/api/yaml/openapi/components/schemas/Factor.yaml diff --git a/api/yaml/openapi/components/schemas/NewOrg.yaml b/kibble/api/yaml/openapi/components/schemas/NewOrg.yaml similarity index 100% rename from api/yaml/openapi/components/schemas/NewOrg.yaml rename to kibble/api/yaml/openapi/components/schemas/NewOrg.yaml diff --git a/api/yaml/openapi/components/schemas/OrgMembers.yaml b/kibble/api/yaml/openapi/components/schemas/OrgMembers.yaml similarity index 100% rename from api/yaml/openapi/components/schemas/OrgMembers.yaml rename to kibble/api/yaml/openapi/components/schemas/OrgMembers.yaml diff --git a/api/yaml/openapi/components/schemas/Organisation.yaml b/kibble/api/yaml/openapi/components/schemas/Organisation.yaml similarity index 100% rename from api/yaml/openapi/components/schemas/Organisation.yaml rename to kibble/api/yaml/openapi/components/schemas/Organisation.yaml diff --git a/api/yaml/openapi/components/schemas/Phrase.yaml b/kibble/api/yaml/openapi/components/schemas/Phrase.yaml similarity index 100% rename from api/yaml/openapi/components/schemas/Phrase.yaml rename to kibble/api/yaml/openapi/components/schemas/Phrase.yaml diff --git a/api/yaml/openapi/components/schemas/PhraseList.yaml b/kibble/api/yaml/openapi/components/schemas/PhraseList.yaml similarity index 100% rename from api/yaml/openapi/components/schemas/PhraseList.yaml rename to kibble/api/yaml/openapi/components/schemas/PhraseList.yaml diff --git a/api/yaml/openapi/components/schemas/Sloc.yaml b/kibble/api/yaml/openapi/components/schemas/Sloc.yaml similarity index 100% rename from api/yaml/openapi/components/schemas/Sloc.yaml rename to kibble/api/yaml/openapi/components/schemas/Sloc.yaml diff --git a/api/yaml/openapi/components/schemas/Source.yaml b/kibble/api/yaml/openapi/components/schemas/Source.yaml similarity index 100% rename from api/yaml/openapi/components/schemas/Source.yaml rename to kibble/api/yaml/openapi/components/schemas/Source.yaml diff --git a/api/yaml/openapi/components/schemas/SourceID.yaml b/kibble/api/yaml/openapi/components/schemas/SourceID.yaml similarity index 100% rename from api/yaml/openapi/components/schemas/SourceID.yaml rename to kibble/api/yaml/openapi/components/schemas/SourceID.yaml diff --git a/api/yaml/openapi/components/schemas/SourceList.yaml b/kibble/api/yaml/openapi/components/schemas/SourceList.yaml similarity index 100% rename from api/yaml/openapi/components/schemas/SourceList.yaml rename to kibble/api/yaml/openapi/components/schemas/SourceList.yaml diff --git a/api/yaml/openapi/components/schemas/SourceListAdd.yaml b/kibble/api/yaml/openapi/components/schemas/SourceListAdd.yaml similarity index 100% rename from api/yaml/openapi/components/schemas/SourceListAdd.yaml rename to kibble/api/yaml/openapi/components/schemas/SourceListAdd.yaml diff --git a/api/yaml/openapi/components/schemas/SourceType.yaml b/kibble/api/yaml/openapi/components/schemas/SourceType.yaml similarity index 100% rename from api/yaml/openapi/components/schemas/SourceType.yaml rename to kibble/api/yaml/openapi/components/schemas/SourceType.yaml diff --git a/api/yaml/openapi/components/schemas/SourceTypes.yaml b/kibble/api/yaml/openapi/components/schemas/SourceTypes.yaml similarity index 100% rename from api/yaml/openapi/components/schemas/SourceTypes.yaml rename to kibble/api/yaml/openapi/components/schemas/SourceTypes.yaml diff --git a/api/yaml/openapi/components/schemas/Timeseries.yaml b/kibble/api/yaml/openapi/components/schemas/Timeseries.yaml similarity index 100% rename from api/yaml/openapi/components/schemas/Timeseries.yaml rename to kibble/api/yaml/openapi/components/schemas/Timeseries.yaml diff --git a/api/yaml/openapi/components/schemas/TimeseriesObject.yaml b/kibble/api/yaml/openapi/components/schemas/TimeseriesObject.yaml similarity index 100% rename from api/yaml/openapi/components/schemas/TimeseriesObject.yaml rename to kibble/api/yaml/openapi/components/schemas/TimeseriesObject.yaml diff --git a/api/yaml/openapi/components/schemas/TopList.yaml b/kibble/api/yaml/openapi/components/schemas/TopList.yaml similarity index 100% rename from api/yaml/openapi/components/schemas/TopList.yaml rename to kibble/api/yaml/openapi/components/schemas/TopList.yaml diff --git a/api/yaml/openapi/components/schemas/Trend.yaml b/kibble/api/yaml/openapi/components/schemas/Trend.yaml similarity index 100% rename from api/yaml/openapi/components/schemas/Trend.yaml rename to kibble/api/yaml/openapi/components/schemas/Trend.yaml diff --git a/api/yaml/openapi/components/schemas/UserAccount.yaml b/kibble/api/yaml/openapi/components/schemas/UserAccount.yaml similarity index 100% rename from api/yaml/openapi/components/schemas/UserAccount.yaml rename to kibble/api/yaml/openapi/components/schemas/UserAccount.yaml diff --git a/api/yaml/openapi/components/schemas/UserAccountEdit.yaml b/kibble/api/yaml/openapi/components/schemas/UserAccountEdit.yaml similarity index 100% rename from api/yaml/openapi/components/schemas/UserAccountEdit.yaml rename to kibble/api/yaml/openapi/components/schemas/UserAccountEdit.yaml diff --git a/api/yaml/openapi/components/schemas/UserCredentials.yaml b/kibble/api/yaml/openapi/components/schemas/UserCredentials.yaml similarity index 100% rename from api/yaml/openapi/components/schemas/UserCredentials.yaml rename to kibble/api/yaml/openapi/components/schemas/UserCredentials.yaml diff --git a/api/yaml/openapi/components/schemas/UserData.yaml b/kibble/api/yaml/openapi/components/schemas/UserData.yaml similarity index 100% rename from api/yaml/openapi/components/schemas/UserData.yaml rename to kibble/api/yaml/openapi/components/schemas/UserData.yaml diff --git a/api/yaml/openapi/components/schemas/UserName.yaml b/kibble/api/yaml/openapi/components/schemas/UserName.yaml similarity index 100% rename from api/yaml/openapi/components/schemas/UserName.yaml rename to kibble/api/yaml/openapi/components/schemas/UserName.yaml diff --git a/api/yaml/openapi/components/schemas/View.yaml b/kibble/api/yaml/openapi/components/schemas/View.yaml similarity index 100% rename from api/yaml/openapi/components/schemas/View.yaml rename to kibble/api/yaml/openapi/components/schemas/View.yaml diff --git a/api/yaml/openapi/components/schemas/ViewList.yaml b/kibble/api/yaml/openapi/components/schemas/ViewList.yaml similarity index 100% rename from api/yaml/openapi/components/schemas/ViewList.yaml rename to kibble/api/yaml/openapi/components/schemas/ViewList.yaml diff --git a/api/yaml/openapi/components/schemas/WidgetApp.yaml b/kibble/api/yaml/openapi/components/schemas/WidgetApp.yaml similarity index 100% rename from api/yaml/openapi/components/schemas/WidgetApp.yaml rename to kibble/api/yaml/openapi/components/schemas/WidgetApp.yaml diff --git a/api/yaml/openapi/components/schemas/WidgetDesign.yaml b/kibble/api/yaml/openapi/components/schemas/WidgetDesign.yaml similarity index 100% rename from api/yaml/openapi/components/schemas/WidgetDesign.yaml rename to kibble/api/yaml/openapi/components/schemas/WidgetDesign.yaml diff --git a/api/yaml/openapi/components/schemas/WidgetRow.yaml b/kibble/api/yaml/openapi/components/schemas/WidgetRow.yaml similarity index 100% rename from api/yaml/openapi/components/schemas/WidgetRow.yaml rename to kibble/api/yaml/openapi/components/schemas/WidgetRow.yaml diff --git a/api/yaml/openapi/components/schemas/defaultWidgetArgs.yaml b/kibble/api/yaml/openapi/components/schemas/defaultWidgetArgs.yaml similarity index 100% rename from api/yaml/openapi/components/schemas/defaultWidgetArgs.yaml rename to kibble/api/yaml/openapi/components/schemas/defaultWidgetArgs.yaml diff --git a/api/yaml/openapi/components/schemas/editView.yaml b/kibble/api/yaml/openapi/components/schemas/editView.yaml similarity index 100% rename from api/yaml/openapi/components/schemas/editView.yaml rename to kibble/api/yaml/openapi/components/schemas/editView.yaml diff --git a/api/yaml/openapi/components/securitySchemes/cookieAuth.yaml b/kibble/api/yaml/openapi/components/securitySchemes/cookieAuth.yaml similarity index 100% rename from api/yaml/openapi/components/securitySchemes/cookieAuth.yaml rename to kibble/api/yaml/openapi/components/securitySchemes/cookieAuth.yaml diff --git a/api/yaml/sourcetypes.yaml b/kibble/api/yaml/sourcetypes.yaml similarity index 100% rename from api/yaml/sourcetypes.yaml rename to kibble/api/yaml/sourcetypes.yaml diff --git a/api/yaml/widgets.yaml b/kibble/api/yaml/widgets.yaml similarity index 100% rename from api/yaml/widgets.yaml rename to kibble/api/yaml/widgets.yaml diff --git a/kibble/settings.py b/kibble/settings.py new file mode 100644 index 00000000..db2c1bc8 --- /dev/null +++ b/kibble/settings.py @@ -0,0 +1,25 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import os + +YAML_DIRECTORY = os.path.join( + os.path.dirname(os.path.realpath(__file__)), + "api", + "yaml", +) +KIBBLE_YAML = os.path.join(YAML_DIRECTORY, "kibble.yaml") diff --git a/setup/kibble.yaml.sample b/kibble/setup/kibble.yaml.sample similarity index 100% rename from setup/kibble.yaml.sample rename to kibble/setup/kibble.yaml.sample diff --git a/setup/makeaccount.py b/kibble/setup/makeaccount.py similarity index 96% rename from setup/makeaccount.py rename to kibble/setup/makeaccount.py index 64015bb6..1f169d37 100644 --- a/setup/makeaccount.py +++ b/kibble/setup/makeaccount.py @@ -21,6 +21,9 @@ import yaml import bcrypt +from kibble.settings import YAML_DIRECTORY, KIBBLE_YAML + + class KibbleDatabase(object): def __init__(self, config): self.config = config @@ -49,7 +52,8 @@ def __init__(self, config): args = arg_parser.parse_args() # Load Kibble master configuration -config = yaml.load(open("../api/yaml/kibble.yaml")) +with open(KIBBLE_YAML) as f: + config = yaml.load(f) DB = KibbleDatabase(config) diff --git a/setup/mappings.json b/kibble/setup/mappings.json similarity index 100% rename from setup/mappings.json rename to kibble/setup/mappings.json diff --git a/setup/setup.py b/kibble/setup/setup.py similarity index 98% rename from setup/setup.py rename to kibble/setup/setup.py index 689a2971..c204457b 100644 --- a/setup/setup.py +++ b/kibble/setup/setup.py @@ -29,6 +29,8 @@ import json from elasticsearch import Elasticsearch +from kibble.settings import KIBBLE_YAML + KIBBLE_VERSION = '0.1.0' # ABI/API compat demarcation. KIBBLE_DB_VERSION = 2 # Second database revision @@ -219,13 +221,7 @@ def create_es_index( def get_kibble_yaml() -> str: """Resolve path to kibble config yaml""" - kibble_yaml = os.path.join( - os.path.dirname(os.path.realpath(__file__)), - os.pardir, - "api", - "yaml", - "kibble.yaml" - ) + kibble_yaml = KIBBLE_YAML if os.path.exists(kibble_yaml): print(f"{kibble_yaml} already exists! Writing to {kibble_yaml}.tmp instead") kibble_yaml = kibble_yaml + ".tmp" diff --git a/kibble/version.py b/kibble/version.py new file mode 100644 index 00000000..a913b5ac --- /dev/null +++ b/kibble/version.py @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +version = "1.0.0dev" diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 00000000..b3135bb3 --- /dev/null +++ b/setup.cfg @@ -0,0 +1,36 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +[metadata] +name = Kibble +summary = Apache Kibble is a tool to collect, aggregate and visualize data about any software project that uses commonly known tools. +description-file = README.md +author = Apache Kibble +author-email = dev@kibble.apache.org +license = Apache License, Version 2.0 +license_files = + LICENSE + NOTICE + +[bdist_wheel] +python-tag=py3 + + +[files] +packages = kibble + +[easy_install] diff --git a/setup.py b/setup.py new file mode 100644 index 00000000..4208d68e --- /dev/null +++ b/setup.py @@ -0,0 +1,111 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import os +from importlib import util + +from setuptools import find_packages, setup + +# Kept manually in sync with kibble.version +spec = util.spec_from_file_location("kibble.version", os.path.join('kibble', 'version.py')) # noqa +mod = util.module_from_spec(spec) +spec.loader.exec_module(mod) # type: ignore +version = mod.version # type: ignore + +DEVEL_REQUIREMENTS = [ + "pre-commit==2.7.1", +] + +INSTALL_REQUIREMENTS = [ + "bcrypt==3.2.0", + "certifi==2020.6.20", + "elasticsearch==7.9.1", + "gunicorn==20.0.4", + "python-dateutil==2.8.1", + "PyYAML==5.3.1", + "tenacity==6.2.0", +] + +EXTRAS_REQUIREMENTS = { + "devel": DEVEL_REQUIREMENTS +} + + +def get_long_description(): + description = "" + try: + with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'README.md'), encoding='utf-8') as f: + description = f.read() + except FileNotFoundError: + pass + return description + + +def do_setup(): + """Perform the Kibble package setup.""" + setup( + name='apache-kibble', + description="Apache Kibble is a tool to collect, aggregate and visualize data about any software project.", + long_description=get_long_description(), + long_description_content_type='text/markdown', + license='Apache License 2.0', + version=version, + packages=find_packages(include=['kibble*']), + package_data={ + 'kibble': ['py.typed'], + 'kibble.api.yaml': ['*.yaml'], + }, + include_package_data=True, + zip_safe=False, + entry_points={ + "console_scripts": [ + "kibble = kibble.__main__:main", + ], + }, + install_requires=INSTALL_REQUIREMENTS, + setup_requires=[ + 'docutils', + 'gitpython', + 'setuptools', + 'wheel', + ], + extras_require=EXTRAS_REQUIREMENTS, + classifiers=[ + 'Development Status :: 5 - Production/Stable', + 'Environment :: Console', + 'Environment :: Web Environment', + 'Intended Audience :: Developers', + 'Intended Audience :: System Administrators', + 'License :: OSI Approved :: Apache Software License', + 'Programming Language :: Python :: 3.8', + ], + author='Apache Software Foundation', + author_email='dev@kibble.apache.org', + url='http://kibble.apache.org/', + download_url=f'https://archive.apache.org/dist/kibble/{version}', + test_suite='setup.kibble_test_suite', + python_requires='~=3.8', + project_urls={ + 'Documentation': 'https://kibble.apache.org/docs/', + 'Bug Tracker': 'https://github.com/apache/kibble/issues', + 'Source Code': 'https://github.com/apache/kibble', + }, + ) + + +if __name__ == "__main__": + do_setup() diff --git a/setup/requirements.txt b/setup/requirements.txt deleted file mode 100644 index 6a2d1f3a..00000000 --- a/setup/requirements.txt +++ /dev/null @@ -1,8 +0,0 @@ -bcrypt==3.2.0 -certifi==2020.6.20 -elasticsearch==7.9.1 -gunicorn==20.0.4 -pre-commit==2.7.1 -python-dateutil==2.8.1 -PyYAML==5.3.1 -tenacity==6.2.0 From 340f104663261203e573fab504bd843e874a938e Mon Sep 17 00:00:00 2001 From: Tomek Urbaszek Date: Sat, 24 Oct 2020 18:05:50 +0200 Subject: [PATCH 09/48] Add auto-labeler for PRs (#68) --- .github/labeler.yml | 11 +++++++++++ .github/workflows/labeler.yaml | 11 +++++++++++ 2 files changed, 22 insertions(+) create mode 100644 .github/labeler.yml create mode 100644 .github/workflows/labeler.yaml diff --git a/.github/labeler.yml b/.github/labeler.yml new file mode 100644 index 00000000..fb2173bd --- /dev/null +++ b/.github/labeler.yml @@ -0,0 +1,11 @@ +area:api: +- any: ['api/'] + +area:ui: +- any: ['ui/'] + +area:docs: +- any: ['docs/', '*.md'] + +area:dev: +- any: ['.github/', '.pre-commit.config.yaml', 'asf.yaml', 'Dockerfile*', 'docker*'] diff --git a/.github/workflows/labeler.yaml b/.github/workflows/labeler.yaml new file mode 100644 index 00000000..4415f001 --- /dev/null +++ b/.github/workflows/labeler.yaml @@ -0,0 +1,11 @@ +name: "Pull Request Labeler" +on: + - pull_request_target + +jobs: + triage: + runs-on: ubuntu-latest + steps: + - uses: actions/labeler@main + with: + repo-token: "${{ secrets.GITHUB_TOKEN }}" From e4563ca6bc9cb390bf3f1701d5ef8833fd9d0cc2 Mon Sep 17 00:00:00 2001 From: Tomek Urbaszek Date: Sat, 24 Oct 2020 18:21:04 +0200 Subject: [PATCH 10/48] Fix labeler definitions (#70) --- .github/labeler.yml | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/.github/labeler.yml b/.github/labeler.yml index fb2173bd..ee69a607 100644 --- a/.github/labeler.yml +++ b/.github/labeler.yml @@ -1,11 +1,17 @@ area:api: -- any: ['api/'] + - 'kibble/api/*' area:ui: -- any: ['ui/'] + - 'ui/*' area:docs: -- any: ['docs/', '*.md'] + - 'docs/*' + - '*.md' area:dev: -- any: ['.github/', '.pre-commit.config.yaml', 'asf.yaml', 'Dockerfile*', 'docker*'] + - '.github/' + - '.pre-commit.config.yaml' + - 'asf.yaml' + - 'Dockerfile*' + - 'docker*' + - 'setup.*' From d7f9031dfd93a2efd676fcbd59443feec01df6ed Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20S=C5=82owikowski?= Date: Sun, 25 Oct 2020 01:43:30 +0200 Subject: [PATCH 11/48] Add Black to pre-commit (#66) Add Black to pre-commit and format whole codebase --- .pre-commit-config.yaml | 6 + docs/source/conf.py | 64 ++-- kibble/__main__.py | 3 +- kibble/api/handler.py | 88 +++--- kibble/api/pages/__init__.py | 8 +- kibble/api/pages/account.py | 114 ++++--- kibble/api/pages/bio/bio.py | 153 +++++----- kibble/api/pages/bio/newtimers.py | 285 +++++++---------- kibble/api/pages/bio/trends.py | 258 +++++----------- kibble/api/pages/ci/queue.py | 164 ++++------ kibble/api/pages/ci/status.py | 133 ++++---- kibble/api/pages/ci/top-buildcount.py | 128 ++++---- kibble/api/pages/ci/top-buildtime.py | 132 ++++---- kibble/api/pages/code/changes.py | 154 ++++------ kibble/api/pages/code/commits.py | 135 ++++---- kibble/api/pages/code/committers.py | 245 ++++++--------- kibble/api/pages/code/evolution.py | 109 +++---- kibble/api/pages/code/pony-timeseries.py | 129 ++++---- kibble/api/pages/code/pony.py | 220 +++++-------- kibble/api/pages/code/punchcard.py | 132 ++++---- kibble/api/pages/code/relationships.py | 217 +++++++------ kibble/api/pages/code/retention.py | 144 ++++----- kibble/api/pages/code/sloc.py | 85 ++---- kibble/api/pages/code/top-commits.py | 113 +++---- kibble/api/pages/code/top-sloc.py | 73 ++--- kibble/api/pages/code/trends.py | 323 +++++++------------- kibble/api/pages/filters.py | 39 +-- kibble/api/pages/forum/actors.py | 196 +++++------- kibble/api/pages/forum/creators.py | 126 +++----- kibble/api/pages/forum/issues.py | 210 ++++++------- kibble/api/pages/forum/responders.py | 127 +++----- kibble/api/pages/forum/top-count.py | 102 +++---- kibble/api/pages/forum/top.py | 102 +++---- kibble/api/pages/forum/trends.py | 306 +++++++------------ kibble/api/pages/issue/actors.py | 204 +++++-------- kibble/api/pages/issue/age.py | 97 +++--- kibble/api/pages/issue/closers.py | 129 +++----- kibble/api/pages/issue/issues.py | 211 ++++++------- kibble/api/pages/issue/openers.py | 128 +++----- kibble/api/pages/issue/pony-timeseries.py | 132 ++++---- kibble/api/pages/issue/relationships.py | 222 ++++++-------- kibble/api/pages/issue/retention.py | 148 ++++----- kibble/api/pages/issue/top-count.py | 106 +++---- kibble/api/pages/issue/top.py | 107 +++---- kibble/api/pages/issue/trends.py | 330 +++++++------------- kibble/api/pages/mail/keyphrases.py | 93 ++---- kibble/api/pages/mail/map.py | 238 ++++++++------- kibble/api/pages/mail/mood-timeseries.py | 126 +++----- kibble/api/pages/mail/mood.py | 163 ++++------ kibble/api/pages/mail/pony-timeseries.py | 121 +++----- kibble/api/pages/mail/relationships.py | 192 ++++++------ kibble/api/pages/mail/retention.py | 136 ++++----- kibble/api/pages/mail/timeseries-single.py | 105 +++---- kibble/api/pages/mail/timeseries.py | 155 +++++----- kibble/api/pages/mail/top-authors.py | 115 +++---- kibble/api/pages/mail/top-topics.py | 95 +++--- kibble/api/pages/mail/trends.py | 340 +++++++++------------ kibble/api/pages/org/contributors.py | 124 ++++---- kibble/api/pages/org/list.py | 83 +++-- kibble/api/pages/org/members.py | 194 +++++++----- kibble/api/pages/org/sourcetypes.py | 4 - kibble/api/pages/org/trends.py | 155 ++++------ kibble/api/pages/session.py | 96 +++--- kibble/api/pages/sources.py | 174 ++++++----- kibble/api/pages/verify.py | 30 +- kibble/api/pages/views.py | 236 +++++++------- kibble/api/pages/widgets.py | 10 +- kibble/api/plugins/database.py | 132 ++++---- kibble/api/plugins/openapi.py | 247 +++++++++------ kibble/api/plugins/session.py | 183 ++++++----- kibble/api/yaml/openapi/combine.py | 48 +-- kibble/settings.py | 4 +- kibble/setup/makeaccount.py | 76 +++-- kibble/setup/setup.py | 239 +++++++-------- setup.py | 75 ++--- 75 files changed, 4564 insertions(+), 6062 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 0bb7740f..889d6237 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -68,3 +68,9 @@ repos: - --license-filepath - license-templates/LICENSE.txt - --fuzzy-match-generates-todo + - repo: https://github.com/psf/black + rev: 19.3b0 + hooks: + - id: black + name: Black + types: [python] diff --git a/docs/source/conf.py b/docs/source/conf.py index 9f49f275..35f0f221 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -46,34 +46,33 @@ # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. -extensions = ['sphinx.ext.todo', - 'sphinx.ext.imgmath'] +extensions = ["sphinx.ext.todo", "sphinx.ext.imgmath"] # Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] +templates_path = ["_templates"] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] -source_suffix = '.rst' +source_suffix = ".rst" # The master toctree document. -master_doc = 'index' +master_doc = "index" # General information about the project. -project = u'Apache Kibble' -copyright = u'2018, The Apache Kibble Community' -author = u'The Apache Kibble Community' +project = u"Apache Kibble" +copyright = u"2018, The Apache Kibble Community" +author = u"The Apache Kibble Community" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. -version = u'0.1' +version = u"0.1" # The full version, including alpha/beta/rc tags. -release = u'0.1' +release = u"0.1" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. @@ -88,7 +87,7 @@ exclude_patterns = [] # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' +pygments_style = "sphinx" # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = True @@ -99,8 +98,8 @@ # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # -html_theme = 'sphinx_rtd_theme' -html_logo = '_static/images/kibble-logo.png' +html_theme = "sphinx_rtd_theme" +html_logo = "_static/images/kibble-logo.png" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the @@ -111,7 +110,7 @@ # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] +html_static_path = ["_static"] # Custom sidebar templates, must be a dictionary that maps document names # to template names. @@ -119,9 +118,9 @@ # This is required for the alabaster theme # refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars html_sidebars = { - '**': [ - 'relations.html', # needs 'show_related': True theme option to display - 'searchbox.html', + "**": [ + "relations.html", # needs 'show_related': True theme option to display + "searchbox.html", ] } @@ -129,7 +128,7 @@ # -- Options for HTMLHelp output ------------------------------------------ # Output file base name for HTML help builder. -htmlhelp_basename = 'ApacheKibbledoc' +htmlhelp_basename = "ApacheKibbledoc" # -- Options for LaTeX output --------------------------------------------- @@ -138,15 +137,12 @@ # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', - # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', - # Additional stuff for the LaTeX preamble. # # 'preamble': '', - # Latex figure (float) alignment # # 'figure_align': 'htbp', @@ -156,8 +152,13 @@ # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ - (master_doc, 'ApacheKibble.tex', u'Apache Kibble Documentation', - u'The Apache Kibble Community', 'manual'), + ( + master_doc, + "ApacheKibble.tex", + u"Apache Kibble Documentation", + u"The Apache Kibble Community", + "manual", + ) ] @@ -165,10 +166,7 @@ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). -man_pages = [ - (master_doc, 'apachekibble', u'Apache Kibble Documentation', - [author], 1) -] +man_pages = [(master_doc, "apachekibble", u"Apache Kibble Documentation", [author], 1)] # -- Options for Texinfo output ------------------------------------------- @@ -177,7 +175,13 @@ # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ - (master_doc, 'ApacheKibble', u'Apache Kibble Documentation', - author, 'ApacheKibble', 'One line description of project.', - 'Miscellaneous'), + ( + master_doc, + "ApacheKibble", + u"Apache Kibble Documentation", + author, + "ApacheKibble", + "One line description of project.", + "Miscellaneous", + ) ] diff --git a/kibble/__main__.py b/kibble/__main__.py index c7a18f74..aea89fd1 100644 --- a/kibble/__main__.py +++ b/kibble/__main__.py @@ -15,9 +15,10 @@ # specific language governing permissions and limitations # under the License. + def main(): print("Hello to kibble!") -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/kibble/api/handler.py b/kibble/api/handler.py index 508a7c0c..63d24891 100644 --- a/kibble/api/handler.py +++ b/kibble/api/handler.py @@ -1,5 +1,3 @@ - - # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information @@ -40,8 +38,9 @@ from kibble.settings import KIBBLE_YAML, YAML_DIRECTORY urls = [] -if __name__ != '__main__': +if __name__ != "__main__": from kibble.api.pages import handlers + for page, handler in handlers.items(): urls.append((r"^(/api/%s)(/.+)?$" % page, handler.run)) @@ -68,6 +67,7 @@ class KibbleAPIWrapper: """ Middleware wrapper for exceptions in the application """ + def __init__(self, path, func): self.func = func self.API = KibbleOpenAPI @@ -79,33 +79,29 @@ def __call__(self, environ, start_response, session): try: # Read JSON client data if any try: - request_size = int(environ.get('CONTENT_LENGTH', 0)) + request_size = int(environ.get("CONTENT_LENGTH", 0)) except (ValueError): request_size = 0 - requestBody = environ['wsgi.input'].read(request_size) + requestBody = environ["wsgi.input"].read(request_size) formdata = {} if requestBody and len(requestBody) > 0: try: - formdata = json.loads(requestBody.decode('utf-8')) + formdata = json.loads(requestBody.decode("utf-8")) except json.JSONDecodeError as err: - start_response('400 Invalid request', [ - ('Content-Type', 'application/json')]) - yield json.dumps({ - "code": 400, - "reason": "Invalid JSON: %s" % err - }) + start_response( + "400 Invalid request", [("Content-Type", "application/json")] + ) + yield json.dumps({"code": 400, "reason": "Invalid JSON: %s" % err}) return # Validate URL against OpenAPI specs try: - self.API.validate(environ['REQUEST_METHOD'], self.path, formdata) + self.API.validate(environ["REQUEST_METHOD"], self.path, formdata) except openapi.OpenAPIException as err: - start_response('400 Invalid request', [ - ('Content-Type', 'application/json')]) - yield json.dumps({ - "code": 400, - "reason": err.message - }) + start_response( + "400 Invalid request", [("Content-Type", "application/json")] + ) + yield json.dumps({"code": 400, "reason": err.message}) return # Call page with env, SR and form data @@ -116,45 +112,41 @@ def __call__(self, environ, start_response, session): yield bucket except KibbleHTTPError as err: errHeaders = { - 403: '403 Authentication failed', - 404: '404 Resource not found', - 500: '500 Internal Server Error', - 501: '501 Gateway error' + 403: "403 Authentication failed", + 404: "404 Resource not found", + 500: "500 Internal Server Error", + 501: "501 Gateway error", } - errHeader = errHeaders[err.code] if err.code in errHeaders else "400 Bad request" - start_response(errHeader, [ - ('Content-Type', 'application/json')]) - yield json.dumps({ - "code": err.code, - "reason": err.message - }, indent = 4) + "\n" + errHeader = ( + errHeaders[err.code] + if err.code in errHeaders + else "400 Bad request" + ) + start_response(errHeader, [("Content-Type", "application/json")]) + yield json.dumps( + {"code": err.code, "reason": err.message}, indent=4 + ) + "\n" return except: err_type, err_value, tb = sys.exc_info() - traceback_output = ['API traceback:'] + traceback_output = ["API traceback:"] traceback_output += traceback.format_tb(tb) - traceback_output.append('%s: %s' % (err_type.__name__, err_value)) + traceback_output.append("%s: %s" % (err_type.__name__, err_value)) # We don't know if response has been given yet, try giving one, fail gracefully. try: - start_response('500 Internal Server Error', [ - ('Content-Type', 'application/json')]) + start_response( + "500 Internal Server Error", [("Content-Type", "application/json")] + ) except: pass - yield json.dumps({ - "code": "500", - "reason": '\n'.join(traceback_output) - }) + yield json.dumps({"code": "500", "reason": "\n".join(traceback_output)}) def fourohfour(environ, start_response): """A very simple 404 handler""" - start_response("404 Not Found", [ - ('Content-Type', 'application/json')]) - yield json.dumps({ - "code": 404, - "reason": "API endpoint not found" - }, indent = 4) + "\n" + start_response("404 Not Found", [("Content-Type", "application/json")]) + yield json.dumps({"code": 404, "reason": "API endpoint not found"}, indent=4) + "\n" return @@ -165,7 +157,7 @@ def application(environ, start_response): it and returns the output. """ db = KibbleDatabase(config) - path = environ.get('PATH_INFO', '') + path = environ.get("PATH_INFO", "") for regex, function in urls: m = re.match(regex, path) if m: @@ -182,14 +174,14 @@ def application(environ, start_response): a += 1 # WSGI prefers byte strings, so convert if regular py3 string if isinstance(bucket, str): - yield bytes(bucket, encoding = 'utf-8') + yield bytes(bucket, encoding="utf-8") elif isinstance(bucket, bytes): yield bucket return for bucket in fourohfour(environ, start_response): - yield bytes(bucket, encoding = 'utf-8') + yield bytes(bucket, encoding="utf-8") -if __name__ == '__main__': +if __name__ == "__main__": KibbleOpenAPI.toHTML() diff --git a/kibble/api/pages/__init__.py b/kibble/api/pages/__init__.py index a9b1b9db..76f12359 100644 --- a/kibble/api/pages/__init__.py +++ b/kibble/api/pages/__init__.py @@ -24,6 +24,7 @@ import importlib import os + # Define all the submodules we have rootpath = os.path.join(os.path.dirname(os.path.realpath(__file__))) @@ -32,6 +33,7 @@ # Import each submodule into a hash called 'handlers' handlers = {} + def loadPage(path): for el in os.listdir(path): filepath = os.path.join(path, el) @@ -39,8 +41,10 @@ def loadPage(path): if os.path.isdir(filepath): loadPage(filepath) else: - p = filepath.replace(rootpath, "")[1:].replace('/', '.')[:-3] - xp = p.replace('.', '/') + p = filepath.replace(rootpath, "")[1:].replace("/", ".")[:-3] + xp = p.replace(".", "/") print("Loading endpoint pages.%s as %s" % (p, xp)) handlers[xp] = importlib.import_module(f"kibble.api.pages.{p}") + + loadPage(rootpath) diff --git a/kibble/api/pages/account.py b/kibble/api/pages/account.py index 8821eee0..196d06a7 100644 --- a/kibble/api/pages/account.py +++ b/kibble/api/pages/account.py @@ -106,44 +106,55 @@ def sendCode(session, addr, code): msg = email.message.EmailMessage() - msg['To'] = addr - msg['From'] = session.config['mail']['sender'] - msg['Subject'] = "Please verify your account" - msg.set_content("""\ + msg["To"] = addr + msg["From"] = session.config["mail"]["sender"] + msg["Subject"] = "Please verify your account" + msg.set_content( + """\ Hi there! Please verify your account by visiting: %s/api/verify/%s/%s With regards, Apache Kibble. -""" % (session.url, addr, code) +""" + % (session.url, addr, code) + ) + s = smtplib.SMTP( + "%s:%s" + % (session.config["mail"]["mailhost"], session.config["mail"]["mailport"]) ) - s = smtplib.SMTP("%s:%s" % (session.config['mail']['mailhost'], session.config['mail']['mailport'])) s.send_message(msg) s.quit() + def run(API, environ, indata, session): - method = environ['REQUEST_METHOD'] + method = environ["REQUEST_METHOD"] # Add a new account?? if method == "PUT": - u = indata['email'] - p = indata['password'] - d = indata['displayname'] + u = indata["email"] + p = indata["password"] + d = indata["displayname"] # Are new accounts allowed? (admin can always make accounts, of course) - if not session.config['accounts'].get('allowSignup', False): - if not (session.user and session.user['level'] == 'admin'): - raise API.exception(403, "New account requests have been administratively disabled.") + if not session.config["accounts"].get("allowSignup", False): + if not (session.user and session.user["level"] == "admin"): + raise API.exception( + 403, "New account requests have been administratively disabled." + ) # Check if we already have that username in use - if session.DB.ES.exists(index=session.DB.dbname, doc_type='useraccount', id = u): + if session.DB.ES.exists(index=session.DB.dbname, doc_type="useraccount", id=u): raise API.exception(403, "Username already in use") # We require a username, displayName password of at least 3 chars each if len(p) < 3 or len(u) < 3 or len(d) < 3: - raise API.exception(400, "Username, display-name and password must each be at elast 3 characters long.") + raise API.exception( + 400, + "Username, display-name and password must each be at elast 3 characters long.", + ) # We loosely check that the email is an email if not re.match(r"^\S+@\S+\.\S+$", u): @@ -151,7 +162,7 @@ def run(API, environ, indata, session): # Okay, let's make an account...I guess salt = bcrypt.gensalt() - pwd = bcrypt.hashpw(p.encode('utf-8'), salt).decode('ascii') + pwd = bcrypt.hashpw(p.encode("utf-8"), salt).decode("ascii") # Verification code, if needed vsalt = bcrypt.gensalt() @@ -161,32 +172,33 @@ def run(API, environ, indata, session): # This is so previously unverified accounts don'thave to verify # if we later turn verification on. verified = True - if session.config['accounts'].get('verify'): + if session.config["accounts"].get("verify"): verified = False - sendCode(session, u, vcode) # Send verification email + sendCode(session, u, vcode) # Send verification email # If verification email fails, skip account creation. doc = { - 'email': u, # Username (email) - 'password': pwd, # Hashed password - 'displayName': d, # Display Name - 'organisations': [], # Orgs user belongs to (default is none) - 'ownerships': [], # Orgs user owns (default is none) - 'defaultOrganisation': None, # Default org for user - 'verified': verified, # Account verified via email? - 'vcode': vcode, # Verification code - 'userlevel': "user" # User level (user/admin) + "email": u, # Username (email) + "password": pwd, # Hashed password + "displayName": d, # Display Name + "organisations": [], # Orgs user belongs to (default is none) + "ownerships": [], # Orgs user owns (default is none) + "defaultOrganisation": None, # Default org for user + "verified": verified, # Account verified via email? + "vcode": vcode, # Verification code + "userlevel": "user", # User level (user/admin) } - # If we have auto-invite on, check if there are orgs to invite to - if 'autoInvite' in session.config['accounts']: - dom = u.split('@')[-1].lower() - for ai in session.config['accounts']['autoInvite']: - if ai['domain'] == dom: - doc['organisations'].append(ai['organisation']) - - session.DB.ES.index(index=session.DB.dbname, doc_type='useraccount', id = u, body = doc) + if "autoInvite" in session.config["accounts"]: + dom = u.split("@")[-1].lower() + for ai in session.config["accounts"]["autoInvite"]: + if ai["domain"] == dom: + doc["organisations"].append(ai["organisation"]) + + session.DB.ES.index( + index=session.DB.dbname, doc_type="useraccount", id=u, body=doc + ) yield json.dumps({"message": "Account created!", "verified": verified}) return @@ -194,24 +206,30 @@ def run(API, environ, indata, session): if not session.user: raise API.exception(403, "You must be logged in to use this API endpoint! %s") - # Patch (edit) an account if method == "PATCH": - userid = session.user['email'] - if indata.get('email') and session.user['userlevel'] == "admin": - userid = indata.get('email') - doc = session.DB.ES.get(index=session.DB.dbname, doc_type='useraccount', id = userid) - udoc = doc['_source'] - if indata.get('defaultOrganisation'): + userid = session.user["email"] + if indata.get("email") and session.user["userlevel"] == "admin": + userid = indata.get("email") + doc = session.DB.ES.get( + index=session.DB.dbname, doc_type="useraccount", id=userid + ) + udoc = doc["_source"] + if indata.get("defaultOrganisation"): # Make sure user is a member or admin here.. - if session.user['userlevel'] == "admin" or indata.get('defaultOrganisation') in udoc['organisations']: - udoc['defaultOrganisation'] = indata.get('defaultOrganisation') + if ( + session.user["userlevel"] == "admin" + or indata.get("defaultOrganisation") in udoc["organisations"] + ): + udoc["defaultOrganisation"] = indata.get("defaultOrganisation") # Changing pasword? - if indata.get('password'): - p = indata.get('password') + if indata.get("password"): + p = indata.get("password") salt = bcrypt.gensalt() - pwd = bcrypt.hashpw(p.encode('utf-8'), salt).decode('ascii') + pwd = bcrypt.hashpw(p.encode("utf-8"), salt).decode("ascii") # Update user doc - session.DB.ES.index(index=session.DB.dbname, doc_type='useraccount', id = userid, body = udoc) + session.DB.ES.index( + index=session.DB.dbname, doc_type="useraccount", id=userid, body=udoc + ) yield json.dumps({"message": "Account updated!"}) return diff --git a/kibble/api/pages/bio/bio.py b/kibble/api/pages/bio/bio.py index c2c91cbb..32f62c42 100644 --- a/kibble/api/pages/bio/bio.py +++ b/kibble/api/pages/bio/bio.py @@ -61,9 +61,6 @@ ######################################################################## - - - """ This is the contributor trends renderer for Kibble """ @@ -72,6 +69,7 @@ import time import hashlib + def run(API, environ, indata, session): # We need to be logged in for this! @@ -82,116 +80,101 @@ def run(API, environ, indata, session): # First, fetch the view if we have such a thing enabled viewList = [] - if indata.get('view'): - viewList = session.getView(indata.get('view')) - if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) + if indata.get("view"): + viewList = session.getView(indata.get("view")) + if indata.get("subfilter"): + viewList = session.subFilter(indata.get("subfilter"), view=viewList) + dOrg = session.user["defaultOrganisation"] or "apache" - dOrg = session.user['defaultOrganisation'] or "apache" - - pid = hashlib.sha1( ("%s%s" % (dOrg, indata.get('email', '???'))).encode('ascii', errors='replace')).hexdigest() + pid = hashlib.sha1( + ("%s%s" % (dOrg, indata.get("email", "???"))).encode("ascii", errors="replace") + ).hexdigest() person = {} - if session.DB.ES.exists(index=session.DB.dbname, doc_type="person", id = pid): - person = session.DB.ES.get(index=session.DB.dbname, doc_type="person", id = pid)['_source'] + if session.DB.ES.exists(index=session.DB.dbname, doc_type="person", id=pid): + person = session.DB.ES.get(index=session.DB.dbname, doc_type="person", id=pid)[ + "_source" + ] else: raise API.exception(404, "No such biography!") query = { - 'query': { - 'bool': { - 'must': [ - { - 'term': { - 'organisation': dOrg - } - } - ] - } - }, - 'size': 1, - 'sort': [{ 'ts': 'asc' }] - } + "query": {"bool": {"must": [{"term": {"organisation": dOrg}}]}}, + "size": 1, + "sort": [{"ts": "asc"}], + } # Source-specific or view-specific?? - if indata.get('source'): - query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) + if indata.get("source"): + query["query"]["bool"]["must"].append( + {"term": {"sourceID": indata.get("source")}} + ) elif viewList: - query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) - if indata.get('email'): - codeKey = 'committer_email' - query['query']['bool']['should'] = [ - {'term': {'issueCreator': indata.get('email')}}, - {'term': {'issueCloser': indata.get('email')}}, - {'term': {'sender': indata.get('email')}}, - {'term': {codeKey: indata.get('email')}}, + query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}}) + if indata.get("email"): + codeKey = "committer_email" + query["query"]["bool"]["should"] = [ + {"term": {"issueCreator": indata.get("email")}}, + {"term": {"issueCloser": indata.get("email")}}, + {"term": {"sender": indata.get("email")}}, + {"term": {codeKey: indata.get("email")}}, ] - query['query']['bool']['minimum_should_match'] = 1 - + query["query"]["bool"]["minimum_should_match"] = 1 # FIRST EMAIL - res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="email", - body = query - ) + res = session.DB.ES.search(index=session.DB.dbname, doc_type="email", body=query) firstEmail = None - if res['hits']['hits']: - firstEmail = res['hits']['hits'][0]['_source']['ts'] + if res["hits"]["hits"]: + firstEmail = res["hits"]["hits"][0]["_source"]["ts"] # FIRST COMMIT res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="code_commit", - body = query - ) + index=session.DB.dbname, doc_type="code_commit", body=query + ) firstCommit = None - if res['hits']['hits']: - firstCommit = res['hits']['hits'][0]['_source']['ts'] + if res["hits"]["hits"]: + firstCommit = res["hits"]["hits"][0]["_source"]["ts"] # FIRST AUTHORSHIP - query['query']['bool']['should'][3] = {'term': {'author_email': indata.get('email')}} + query["query"]["bool"]["should"][3] = { + "term": {"author_email": indata.get("email")} + } res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="code_commit", - body = query - ) + index=session.DB.dbname, doc_type="code_commit", body=query + ) firstAuthor = None - if res['hits']['hits']: - firstAuthor = res['hits']['hits'][0]['_source']['ts'] - + if res["hits"]["hits"]: + firstAuthor = res["hits"]["hits"][0]["_source"]["ts"] # COUNT EMAIL, CODE, LINES CHANGED - del query['sort'] - del query['size'] + del query["sort"] + del query["size"] no_emails = session.DB.ES.count( - index=session.DB.dbname, - doc_type="email", - body = query - )['count'] + index=session.DB.dbname, doc_type="email", body=query + )["count"] no_commits = session.DB.ES.count( - index=session.DB.dbname, - doc_type="code_commit", - body = query - )['count'] + index=session.DB.dbname, doc_type="code_commit", body=query + )["count"] JSON_OUT = { - 'found': True, - 'bio': { - 'organisation': dOrg, - 'name': person['name'], - 'email': person['email'], - 'id': pid, - 'gravatar': hashlib.md5(person['email'].lower().encode('utf-8')).hexdigest(), - 'firstEmail': firstEmail, - 'firstCommit': firstCommit, - 'firstAuthor': firstAuthor, - 'tags': person.get('tags', []), - 'alts': person.get('alts', []), - 'emails': no_emails, - 'commits': no_commits + "found": True, + "bio": { + "organisation": dOrg, + "name": person["name"], + "email": person["email"], + "id": pid, + "gravatar": hashlib.md5( + person["email"].lower().encode("utf-8") + ).hexdigest(), + "firstEmail": firstEmail, + "firstCommit": firstCommit, + "firstAuthor": firstAuthor, + "tags": person.get("tags", []), + "alts": person.get("alts", []), + "emails": no_emails, + "commits": no_commits, }, - 'okay': True, - 'responseTime': time.time() - now + "okay": True, + "responseTime": time.time() - now, } yield json.dumps(JSON_OUT) diff --git a/kibble/api/pages/bio/newtimers.py b/kibble/api/pages/bio/newtimers.py index e85a08b7..57b590a9 100644 --- a/kibble/api/pages/bio/newtimers.py +++ b/kibble/api/pages/bio/newtimers.py @@ -61,9 +61,6 @@ ######################################################################## - - - """ This is the newtimers list renderer for Kibble """ @@ -72,44 +69,25 @@ import time import hashlib + def find_earlier(session, query, when, who, which, where, doctype, dOrg): """Find earlier document pertaining to this user. return True if found""" - if 'aggs' in query: - del query['aggs'] - - rangeQuery = {'range': - { - which: { - 'from': 0, - 'to': time.time() - } - } - } - - query['query']['bool']['must'] = [ - rangeQuery, - { - 'term': { - 'organisation': dOrg - } - }, - { - 'term': { - where: who - } + if "aggs" in query: + del query["aggs"] - } - ] - query['size'] = 1 - query['sort'] = [{ which: 'asc' }] + rangeQuery = {"range": {which: {"from": 0, "to": time.time()}}} - res = session.DB.ES.search( - index=session.DB.dbname, - doc_type=doctype, - body = query - ) - if res['hits']['hits']: - doc = res['hits']['hits'][0]['_source'] + query["query"]["bool"]["must"] = [ + rangeQuery, + {"term": {"organisation": dOrg}}, + {"term": {where: who}}, + ] + query["size"] = 1 + query["sort"] = [{which: "asc"}] + + res = session.DB.ES.search(index=session.DB.dbname, doc_type=doctype, body=query) + if res["hits"]["hits"]: + doc = res["hits"]["hits"][0]["_source"] if doc[which] >= when: return [doc[which], doc] else: @@ -128,14 +106,12 @@ def run(API, environ, indata, session): # First, fetch the view if we have such a thing enabled viewList = [] - if indata.get('view'): - viewList = session.getView(indata.get('view')) - if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - - - dOrg = session.user['defaultOrganisation'] or "apache" + if indata.get("view"): + viewList = session.getView(indata.get("view")) + if indata.get("subfilter"): + viewList = session.subFilter(indata.get("subfilter"), view=viewList) + dOrg = session.user["defaultOrganisation"] or "apache" # Keep track of all contributors, and newcomers contributors = [] @@ -144,170 +120,120 @@ def run(API, environ, indata, session): #################################################################### # Start by grabbing all contributors this period via terms agg # #################################################################### - dateTo = indata.get('to', int(time.time())) - dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span - - - + dateTo = indata.get("to", int(time.time())) + dateFrom = indata.get( + "from", dateTo - (86400 * 30 * 6) + ) # Default to a 6 month span ############################ # CODE NEWTIMERS # ############################ - rangeKey = 'ts' - rangeQuery = {'range': - { - rangeKey: { - 'from': dateFrom, - 'to': dateTo - } - } - } + rangeKey = "ts" + rangeQuery = {"range": {rangeKey: {"from": dateFrom, "to": dateTo}}} query = { - 'query': { - 'bool': { - 'must': [ - rangeQuery, - { - 'term': { - 'organisation': dOrg - } - } - ] - } - } - } - - query['aggs'] = { - 'by_committer': { - 'terms': { - 'field': 'committer_email', - 'size': 500 - } - }, - 'by_author': { - 'terms': { - 'field': 'author_email', - 'size': 500 - } - } + "query": {"bool": {"must": [rangeQuery, {"term": {"organisation": dOrg}}]}} + } + + query["aggs"] = { + "by_committer": {"terms": {"field": "committer_email", "size": 500}}, + "by_author": {"terms": {"field": "author_email", "size": 500}}, } # Source-specific or view-specific?? - if indata.get('source'): - query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) + if indata.get("source"): + query["query"]["bool"]["must"].append( + {"term": {"sourceID": indata.get("source")}} + ) elif viewList: - query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) - + query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}}) res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="code_commit", - body = query - ) + index=session.DB.dbname, doc_type="code_commit", body=query + ) code_contributors = [] - for bucket in res['aggregations']['by_committer']['buckets']: - email = bucket['key'] + for bucket in res["aggregations"]["by_committer"]["buckets"]: + email = bucket["key"] if email not in code_contributors: code_contributors.append(email) - for bucket in res['aggregations']['by_author']['buckets']: - email = bucket['key'] + for bucket in res["aggregations"]["by_author"]["buckets"]: + email = bucket["key"] if email not in code_contributors: code_contributors.append(email) # Now, for each contributor, find if they have done anything before for email in code_contributors: - ea = find_earlier(session, query, dateFrom, email, 'ts', 'author_email', 'code_commit', dOrg) - ec = find_earlier(session, query, dateFrom, email, 'ts', 'committer_email', 'code_commit', dOrg) + ea = find_earlier( + session, query, dateFrom, email, "ts", "author_email", "code_commit", dOrg + ) + ec = find_earlier( + session, + query, + dateFrom, + email, + "ts", + "committer_email", + "code_commit", + dOrg, + ) if ea[0] != -1 and ec[0] != -1: earliest = ea if earliest[0] == -1 or (earliest[0] > ec[0] and ec[0] != -1): earliest = ec - newcomers[email] = { - 'code': earliest - } - - + newcomers[email] = {"code": earliest} ############################ # ISSUE NEWTIMERS # ############################ - rangeKey = 'created' - rangeQuery = {'range': - { - rangeKey: { - 'from': dateFrom, - 'to': dateTo - } - } - } + rangeKey = "created" + rangeQuery = {"range": {rangeKey: {"from": dateFrom, "to": dateTo}}} query = { - 'query': { - 'bool': { - 'must': [ - rangeQuery, - { - 'term': { - 'organisation': dOrg - } - } - ] - } - } - } - - query['aggs'] = { - 'by_creator': { - 'terms': { - 'field': 'issueCreator', - 'size': 500 - } - }, - 'by_closer': { - 'terms': { - 'field': 'issueCloser', - 'size': 500 - } - } + "query": {"bool": {"must": [rangeQuery, {"term": {"organisation": dOrg}}]}} + } + + query["aggs"] = { + "by_creator": {"terms": {"field": "issueCreator", "size": 500}}, + "by_closer": {"terms": {"field": "issueCloser", "size": 500}}, } # Source-specific or view-specific?? - if indata.get('source'): - query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) + if indata.get("source"): + query["query"]["bool"]["must"].append( + {"term": {"sourceID": indata.get("source")}} + ) elif viewList: - query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) - + query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}}) - res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="issue", - body = query - ) + res = session.DB.ES.search(index=session.DB.dbname, doc_type="issue", body=query) issue_contributors = [] - for bucket in res['aggregations']['by_creator']['buckets']: - email = bucket['key'] + for bucket in res["aggregations"]["by_creator"]["buckets"]: + email = bucket["key"] if email not in issue_contributors: issue_contributors.append(email) - for bucket in res['aggregations']['by_closer']['buckets']: - email = bucket['key'] + for bucket in res["aggregations"]["by_closer"]["buckets"]: + email = bucket["key"] if email not in issue_contributors: issue_contributors.append(email) # Now, for each contributor, find if they have done anything before for email in issue_contributors: - ecr = find_earlier(session, query, dateFrom, email, 'created', 'issueCreator', 'issue', dOrg) - ecl = find_earlier(session, query, dateFrom, email, 'closed', 'issueCloser', 'issue', dOrg) + ecr = find_earlier( + session, query, dateFrom, email, "created", "issueCreator", "issue", dOrg + ) + ecl = find_earlier( + session, query, dateFrom, email, "closed", "issueCloser", "issue", dOrg + ) if ecr[0] != -1 and ecl[0] != -1: earliest = ecr if earliest[0] == -1 or (earliest[0] > ecl[0] and ecl[0] != -1): earliest = ecl newcomers[email] = newcomers.get(email, {}) - newcomers[email]['issue'] = earliest + newcomers[email]["issue"] = earliest email_contributors = [] @@ -316,12 +242,18 @@ def run(API, environ, indata, session): ################################ for email in newcomers: - pid = hashlib.sha1( ("%s%s" % (dOrg, email)).encode('ascii', errors='replace')).hexdigest() + pid = hashlib.sha1( + ("%s%s" % (dOrg, email)).encode("ascii", errors="replace") + ).hexdigest() person = {} - if session.DB.ES.exists(index=session.DB.dbname, doc_type="person", id = pid): - person = session.DB.ES.get(index=session.DB.dbname, doc_type="person", id = pid)['_source'] - person['md5'] = hashlib.md5(person['email'].encode('utf-8')).hexdigest() # gravatar needed for UI! - newcomers[email]['bio'] = person + if session.DB.ES.exists(index=session.DB.dbname, doc_type="person", id=pid): + person = session.DB.ES.get( + index=session.DB.dbname, doc_type="person", id=pid + )["_source"] + person["md5"] = hashlib.md5( + person["email"].encode("utf-8") + ).hexdigest() # gravatar needed for UI! + newcomers[email]["bio"] = person newcomers_code = [] newcomers_issues = [] @@ -329,30 +261,21 @@ def run(API, environ, indata, session): # Count newcomers in each category (TODO: put this elsewhere earlier) for email, entry in newcomers.items(): - if 'code' in entry: + if "code" in entry: newcomers_code.append(email) - if 'issue' in entry: + if "issue" in entry: newcomers_issues.append(email) - if 'email' in entry: + if "email" in entry: newcomers_email.append(email) JSON_OUT = { - 'okay': True, - 'stats': { - 'code': { - 'newcomers': newcomers_code, - 'seen': len(code_contributors), - }, - 'issues': { - 'newcomers': newcomers_issues, - 'seen': len(issue_contributors), - }, - 'email': { - 'newcomers': newcomers_email, - 'seen': len(email_contributors), - } + "okay": True, + "stats": { + "code": {"newcomers": newcomers_code, "seen": len(code_contributors)}, + "issues": {"newcomers": newcomers_issues, "seen": len(issue_contributors)}, + "email": {"newcomers": newcomers_email, "seen": len(email_contributors)}, }, - 'bios': newcomers, - 'responseTime': time.time() - now + "bios": newcomers, + "responseTime": time.time() - now, } - yield json.dumps(JSON_OUT, indent = 2) + yield json.dumps(JSON_OUT, indent=2) diff --git a/kibble/api/pages/bio/trends.py b/kibble/api/pages/bio/trends.py index 18b84b74..d8dffa66 100644 --- a/kibble/api/pages/bio/trends.py +++ b/kibble/api/pages/bio/trends.py @@ -61,9 +61,6 @@ ######################################################################## - - - """ This is the contributor trends renderer for Kibble """ @@ -71,6 +68,7 @@ import json import time + def run(API, environ, indata, session): # We need to be logged in for this! @@ -81,20 +79,20 @@ def run(API, environ, indata, session): # First, fetch the view if we have such a thing enabled viewList = [] - if indata.get('view'): - viewList = session.getView(indata.get('view')) - if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - - - dateTo = indata.get('to', int(time.time())) - dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span + if indata.get("view"): + viewList = session.getView(indata.get("view")) + if indata.get("subfilter"): + viewList = session.subFilter(indata.get("subfilter"), view=viewList) + + dateTo = indata.get("to", int(time.time())) + dateFrom = indata.get( + "from", dateTo - (86400 * 30 * 6) + ) # Default to a 6 month span if dateFrom < 0: dateFrom = 0 dateYonder = dateFrom - (dateTo - dateFrom) - - dOrg = session.user['defaultOrganisation'] or "apache" + dOrg = session.user["defaultOrganisation"] or "apache" #################################################################### # We start by doing all the queries for THIS period. # @@ -102,111 +100,61 @@ def run(API, environ, indata, session): # and rerun the same queries. # #################################################################### - rangeKey = 'created' - rangeQuery = {'range': - { - rangeKey: { - 'from': dateFrom, - 'to': dateTo - } - } - } + rangeKey = "created" + rangeQuery = {"range": {rangeKey: {"from": dateFrom, "to": dateTo}}} # ISSUES OPENED query = { - 'query': { - 'bool': { - 'must': [ - rangeQuery, - { - 'term': { - 'organisation': dOrg - } - } - ] - } - } - } + "query": {"bool": {"must": [rangeQuery, {"term": {"organisation": dOrg}}]}} + } # Source-specific or view-specific?? - if indata.get('source'): - query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) + if indata.get("source"): + query["query"]["bool"]["must"].append( + {"term": {"sourceID": indata.get("source")}} + ) elif viewList: - query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) - if indata.get('email'): - codeKey = 'committer_email' if not indata.get('author') else 'author_email' - query['query']['bool']['should'] = [ - {'term': {'issueCreator': indata.get('email')}}, - {'term': {'issueCloser': indata.get('email')}}, - {'term': {'sender': indata.get('email')}}, - {'term': {codeKey: indata.get('email')}}, + query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}}) + if indata.get("email"): + codeKey = "committer_email" if not indata.get("author") else "author_email" + query["query"]["bool"]["should"] = [ + {"term": {"issueCreator": indata.get("email")}}, + {"term": {"issueCloser": indata.get("email")}}, + {"term": {"sender": indata.get("email")}}, + {"term": {codeKey: indata.get("email")}}, ] - query['query']['bool']['minimum_should_match'] = 1 - + query["query"]["bool"]["minimum_should_match"] = 1 # ISSUES CREATED - res = session.DB.ES.count( - index=session.DB.dbname, - doc_type="issue", - body = query - ) - no_issues_created = res['count'] - + res = session.DB.ES.count(index=session.DB.dbname, doc_type="issue", body=query) + no_issues_created = res["count"] # ISSUES CLOSED rangeKey = "closed" - query['query']['bool']['must'][0] = {'range': - { - rangeKey: { - 'from': dateFrom, - 'to': dateTo - } - } - } - - res = session.DB.ES.count( - index=session.DB.dbname, - doc_type="issue", - body = query - ) - no_issues_closed = res['count'] + query["query"]["bool"]["must"][0] = { + "range": {rangeKey: {"from": dateFrom, "to": dateTo}} + } + res = session.DB.ES.count(index=session.DB.dbname, doc_type="issue", body=query) + no_issues_closed = res["count"] # EMAIL SENT rangeKey = "ts" - query['query']['bool']['must'][0] = {'range': - { - rangeKey: { - 'from': dateFrom, - 'to': dateTo - } - } - } + query["query"]["bool"]["must"][0] = { + "range": {rangeKey: {"from": dateFrom, "to": dateTo}} + } - res = session.DB.ES.count( - index=session.DB.dbname, - doc_type="email", - body = query - ) - no_email_sent = res['count'] + res = session.DB.ES.count(index=session.DB.dbname, doc_type="email", body=query) + no_email_sent = res["count"] # COMMITS MADE rangeKey = "ts" - query['query']['bool']['must'][0] = {'range': - { - rangeKey: { - 'from': dateFrom, - 'to': dateTo - } - } - } + query["query"]["bool"]["must"][0] = { + "range": {rangeKey: {"from": dateFrom, "to": dateTo}} + } res = session.DB.ES.count( - index=session.DB.dbname, - doc_type="code_commit", - body = query - ) - no_commits = res['count'] - - + index=session.DB.dbname, doc_type="code_commit", body=query + ) + no_commits = res["count"] #################################################################### # Change to PRIOR SPAN # @@ -214,108 +162,64 @@ def run(API, environ, indata, session): # ISSUES OPENED rangeKey = "created" - query['query']['bool']['must'][0] = {'range': - { - rangeKey: { - 'from': dateYonder, - 'to': dateFrom-1 - } - } - } - - res = session.DB.ES.count( - index=session.DB.dbname, - doc_type="issue", - body = query - ) - no_issues_created_before = res['count'] - + query["query"]["bool"]["must"][0] = { + "range": {rangeKey: {"from": dateYonder, "to": dateFrom - 1}} + } + res = session.DB.ES.count(index=session.DB.dbname, doc_type="issue", body=query) + no_issues_created_before = res["count"] # ISSUES CLOSED rangeKey = "closed" - query['query']['bool']['must'][0] = {'range': - { - rangeKey: { - 'from': dateYonder, - 'to': dateFrom-1 - } - } - } - - res = session.DB.ES.count( - index=session.DB.dbname, - doc_type="issue", - body = query - ) - no_issues_closed_before = res['count'] + query["query"]["bool"]["must"][0] = { + "range": {rangeKey: {"from": dateYonder, "to": dateFrom - 1}} + } + res = session.DB.ES.count(index=session.DB.dbname, doc_type="issue", body=query) + no_issues_closed_before = res["count"] # EMAIL SENT rangeKey = "ts" - query['query']['bool']['must'][0] = {'range': - { - rangeKey: { - 'from': dateYonder, - 'to': dateFrom-1 - } - } - } - + query["query"]["bool"]["must"][0] = { + "range": {rangeKey: {"from": dateYonder, "to": dateFrom - 1}} + } - res = session.DB.ES.count( - index=session.DB.dbname, - doc_type="email", - body = query - ) - no_email_sent_before = res['count'] + res = session.DB.ES.count(index=session.DB.dbname, doc_type="email", body=query) + no_email_sent_before = res["count"] # CODE COMMITS rangeKey = "ts" - query['query']['bool']['must'][0] = {'range': - { - rangeKey: { - 'from': dateYonder, - 'to': dateFrom-1 - } - } - } - + query["query"]["bool"]["must"][0] = { + "range": {rangeKey: {"from": dateYonder, "to": dateFrom - 1}} + } res = session.DB.ES.count( - index=session.DB.dbname, - doc_type="code_commit", - body = query - ) - no_commits_before = res['count'] - + index=session.DB.dbname, doc_type="code_commit", body=query + ) + no_commits_before = res["count"] trends = { "created": { - 'before': no_issues_created_before, - 'after': no_issues_created, - 'title': "Issues opened this period" + "before": no_issues_created_before, + "after": no_issues_created, + "title": "Issues opened this period", }, "closed": { - 'before': no_issues_closed_before, - 'after': no_issues_closed, - 'title': "Issues closed this period" + "before": no_issues_closed_before, + "after": no_issues_closed, + "title": "Issues closed this period", }, "email": { - 'before': no_email_sent_before, - 'after': no_email_sent, - 'title': "Emails sent this period" + "before": no_email_sent_before, + "after": no_email_sent, + "title": "Emails sent this period", }, "code": { - 'before': no_commits_before, - 'after': no_commits, - 'title': "Commits this period" - } + "before": no_commits_before, + "after": no_commits, + "title": "Commits this period", + }, } - JSON_OUT = { - 'trends': trends, - 'okay': True, - 'responseTime': time.time() - now - } + JSON_OUT = {"trends": trends, "okay": True, "responseTime": time.time() - now} yield json.dumps(JSON_OUT) diff --git a/kibble/api/pages/ci/queue.py b/kibble/api/pages/ci/queue.py index b64c6ccb..d539bee8 100644 --- a/kibble/api/pages/ci/queue.py +++ b/kibble/api/pages/ci/queue.py @@ -61,7 +61,6 @@ ######################################################################## - """ This is the CI queue timeseries renderer for Kibble """ @@ -70,6 +69,7 @@ import time import hashlib + def run(API, environ, indata, session): # We need to be logged in for this! @@ -80,132 +80,98 @@ def run(API, environ, indata, session): # First, fetch the view if we have such a thing enabled viewList = [] - if indata.get('view'): - viewList = session.getView(indata.get('view')) - if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) + if indata.get("view"): + viewList = session.getView(indata.get("view")) + if indata.get("subfilter"): + viewList = session.subFilter(indata.get("subfilter"), view=viewList) # We only want build sources, so we can sum up later. - viewList = session.subType(['jenkins', 'travis', 'buildbot'], viewList) + viewList = session.subType(["jenkins", "travis", "buildbot"], viewList) - dateTo = indata.get('to', int(time.time())) - dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span - - interval = indata.get('interval', 'month') + dateTo = indata.get("to", int(time.time())) + dateFrom = indata.get( + "from", dateTo - (86400 * 30 * 6) + ) # Default to a 6 month span + interval = indata.get("interval", "month") #################################################################### #################################################################### - dOrg = session.user['defaultOrganisation'] or "apache" + dOrg = session.user["defaultOrganisation"] or "apache" query = { - 'query': { - 'bool': { - 'must': [ - {'range': - { - 'time': { - 'from': dateFrom, - 'to': dateTo - } - } - }, - { - 'term': { - 'organisation': dOrg - } - } - ] - } - } + "query": { + "bool": { + "must": [ + {"range": {"time": {"from": dateFrom, "to": dateTo}}}, + {"term": {"organisation": dOrg}}, + ] } + } + } # Source-specific or view-specific?? - if indata.get('source'): - viewList = [indata.get('source')] + if indata.get("source"): + viewList = [indata.get("source")] - query['query']['bool']['must'].append({'term': {'sourceID': 'x'}}) + query["query"]["bool"]["must"].append({"term": {"sourceID": "x"}}) timeseries = [] for source in viewList: - query['query']['bool']['must'][2] = {'term': {'sourceID': source}} + query["query"]["bool"]["must"][2] = {"term": {"sourceID": source}} # Get queue stats - query['aggs'] = { - 'timeseries': { - 'date_histogram': { - 'field': 'date', - 'interval': interval - }, - 'aggs': { - 'size': { - 'avg': { - 'field': 'size' - } - }, - 'blocked': { - 'avg': { - 'field': 'blocked' - } - }, - 'building': { - 'avg': { - 'field': 'building' - } - }, - 'stuck': { - 'avg': { - 'field': 'stuck' - } - }, - 'wait': { - 'avg': { - 'field': 'avgwait' - } - } - } - } + query["aggs"] = { + "timeseries": { + "date_histogram": {"field": "date", "interval": interval}, + "aggs": { + "size": {"avg": {"field": "size"}}, + "blocked": {"avg": {"field": "blocked"}}, + "building": {"avg": {"field": "building"}}, + "stuck": {"avg": {"field": "stuck"}}, + "wait": {"avg": {"field": "avgwait"}}, + }, } + } res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="ci_queue", - size = 0, - body = query - ) - - for bucket in res['aggregations']['timeseries']['buckets']: - ts = int(bucket['key'] / 1000) - bucket['wait']['value'] = bucket['wait'].get('value', 0) or 0 - if bucket['doc_count'] == 0: + index=session.DB.dbname, doc_type="ci_queue", size=0, body=query + ) + + for bucket in res["aggregations"]["timeseries"]["buckets"]: + ts = int(bucket["key"] / 1000) + bucket["wait"]["value"] = bucket["wait"].get("value", 0) or 0 + if bucket["doc_count"] == 0: continue found = False for t in timeseries: - if t['date'] == ts: + if t["date"] == ts: found = True - t['queue size'] += bucket['size']['value'] - t['builds running'] += bucket['building']['value'] - t['average wait (hours)'] += bucket['wait']['value'] - t['builders'] += 1 + t["queue size"] += bucket["size"]["value"] + t["builds running"] += bucket["building"]["value"] + t["average wait (hours)"] += bucket["wait"]["value"] + t["builders"] += 1 if not found: - timeseries.append({ - 'date': ts, - 'queue size': bucket['size']['value'], - 'builds running': bucket['building']['value'], - 'average wait (hours)': bucket['wait']['value'], - 'builders': 1, - }) + timeseries.append( + { + "date": ts, + "queue size": bucket["size"]["value"], + "builds running": bucket["building"]["value"], + "average wait (hours)": bucket["wait"]["value"], + "builders": 1, + } + ) for t in timeseries: - t['average wait (hours)'] = int(t['average wait (hours)']/360)/10.0 - del t['builders'] + t["average wait (hours)"] = int(t["average wait (hours)"] / 360) / 10.0 + del t["builders"] JSON_OUT = { - 'widgetType': { - 'chartType': 'line', # Recommendation for the UI - 'nofill': True + "widgetType": { + "chartType": "line", # Recommendation for the UI + "nofill": True, }, - 'timeseries': timeseries, - 'interval': interval, - 'okay': True, - 'responseTime': time.time() - now + "timeseries": timeseries, + "interval": interval, + "okay": True, + "responseTime": time.time() - now, } yield json.dumps(JSON_OUT) diff --git a/kibble/api/pages/ci/status.py b/kibble/api/pages/ci/status.py index 1629bc6d..954c4612 100644 --- a/kibble/api/pages/ci/status.py +++ b/kibble/api/pages/ci/status.py @@ -61,7 +61,6 @@ ######################################################################## - """ This is the CI queue status (blocked/stuck) timeseries renderer for Kibble """ @@ -70,6 +69,7 @@ import time import hashlib + def run(API, environ, indata, session): # We need to be logged in for this! @@ -80,104 +80,73 @@ def run(API, environ, indata, session): # First, fetch the view if we have such a thing enabled viewList = [] - if indata.get('view'): - viewList = session.getView(indata.get('view')) - if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - + if indata.get("view"): + viewList = session.getView(indata.get("view")) + if indata.get("subfilter"): + viewList = session.subFilter(indata.get("subfilter"), view=viewList) - dateTo = indata.get('to', int(time.time())) - dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span - - interval = indata.get('interval', 'month') + dateTo = indata.get("to", int(time.time())) + dateFrom = indata.get( + "from", dateTo - (86400 * 30 * 6) + ) # Default to a 6 month span + interval = indata.get("interval", "month") #################################################################### #################################################################### - dOrg = session.user['defaultOrganisation'] or "apache" + dOrg = session.user["defaultOrganisation"] or "apache" query = { - 'query': { - 'bool': { - 'must': [ - {'range': - { - 'time': { - 'from': dateFrom, - 'to': dateTo - } - } - }, - { - 'term': { - 'organisation': dOrg - } - } - ] - } - } + "query": { + "bool": { + "must": [ + {"range": {"time": {"from": dateFrom, "to": dateTo}}}, + {"term": {"organisation": dOrg}}, + ] } + } + } # Source-specific or view-specific?? - if indata.get('source'): - query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) + if indata.get("source"): + query["query"]["bool"]["must"].append( + {"term": {"sourceID": indata.get("source")}} + ) elif viewList: - query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) + query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}}) # Get queue stats - query['aggs'] = { - 'timeseries': { - 'date_histogram': { - 'field': 'date', - 'interval': interval - }, - 'aggs': { - 'size': { - 'avg': { - 'field': 'size' - } - }, - 'blocked': { - 'avg': { - 'field': 'blocked' - } - }, - 'stuck': { - 'avg': { - 'field': 'stuck' - } - }, - 'wait': { - 'avg': { - 'field': 'avgwait' - } - } - } - } + query["aggs"] = { + "timeseries": { + "date_histogram": {"field": "date", "interval": interval}, + "aggs": { + "size": {"avg": {"field": "size"}}, + "blocked": {"avg": {"field": "blocked"}}, + "stuck": {"avg": {"field": "stuck"}}, + "wait": {"avg": {"field": "avgwait"}}, + }, } + } res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="ci_queue", - size = 0, - body = query - ) + index=session.DB.dbname, doc_type="ci_queue", size=0, body=query + ) timeseries = [] - for bucket in res['aggregations']['timeseries']['buckets']: - if bucket['doc_count'] == 0: + for bucket in res["aggregations"]["timeseries"]["buckets"]: + if bucket["doc_count"] == 0: continue - ts = int(bucket['key'] / 1000) - timeseries.append({ - 'date': ts, - 'builds blocked': bucket['blocked']['value'], - 'builds stuck': bucket['stuck']['value'] - }) + ts = int(bucket["key"] / 1000) + timeseries.append( + { + "date": ts, + "builds blocked": bucket["blocked"]["value"], + "builds stuck": bucket["stuck"]["value"], + } + ) JSON_OUT = { - 'widgetType': { - 'chartType': 'bar' # Recommendation for the UI - }, - 'timeseries': timeseries, - 'interval': interval, - 'okay': True, - 'responseTime': time.time() - now + "widgetType": {"chartType": "bar"}, # Recommendation for the UI + "timeseries": timeseries, + "interval": interval, + "okay": True, + "responseTime": time.time() - now, } yield json.dumps(JSON_OUT) diff --git a/kibble/api/pages/ci/top-buildcount.py b/kibble/api/pages/ci/top-buildcount.py index 96e12cb4..52a59e90 100644 --- a/kibble/api/pages/ci/top-buildcount.py +++ b/kibble/api/pages/ci/top-buildcount.py @@ -61,9 +61,6 @@ ######################################################################## - - - """ This is the TopN CI jobs by total build time renderer for Kibble """ @@ -72,6 +69,7 @@ import time import re + def run(API, environ, indata, session): # We need to be logged in for this! @@ -82,96 +80,76 @@ def run(API, environ, indata, session): # First, fetch the view if we have such a thing enabled viewList = [] - if indata.get('view'): - viewList = session.getView(indata.get('view')) - if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) + if indata.get("view"): + viewList = session.getView(indata.get("view")) + if indata.get("subfilter"): + viewList = session.subFilter(indata.get("subfilter"), view=viewList) - dateTo = indata.get('to', int(time.time())) - dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span + dateTo = indata.get("to", int(time.time())) + dateFrom = indata.get( + "from", dateTo - (86400 * 30 * 6) + ) # Default to a 6 month span #################################################################### #################################################################### - dOrg = session.user['defaultOrganisation'] or "apache" + dOrg = session.user["defaultOrganisation"] or "apache" query = { - 'query': { - 'bool': { - 'must': [ - {'range': - { - 'date': { - 'from': time.strftime("%Y/%m/%d %H:%M:%S", time.gmtime(dateFrom)), - 'to': time.strftime("%Y/%m/%d %H:%M:%S", time.gmtime(dateTo)) - } - } - }, - { - 'term': { - 'organisation': dOrg - } + "query": { + "bool": { + "must": [ + { + "range": { + "date": { + "from": time.strftime( + "%Y/%m/%d %H:%M:%S", time.gmtime(dateFrom) + ), + "to": time.strftime( + "%Y/%m/%d %H:%M:%S", time.gmtime(dateTo) + ), } - ] - } - } - } - # Source-specific or view-specific?? - if indata.get('source'): - query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) - elif viewList: - query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) - - query['aggs'] = { - 'by_job': { - 'terms': { - 'field': 'jobURL.keyword', - 'size': 5000, - }, - 'aggs': { - 'duration': { - 'sum': { - 'field': 'duration' - } - }, - 'ci': { - 'terms': { - 'field': 'ci.keyword', - 'size': 1 } }, - 'name': { - 'terms': { - 'field': 'job.keyword', - 'size': 1 - } - } - } + {"term": {"organisation": dOrg}}, + ] } } + } + # Source-specific or view-specific?? + if indata.get("source"): + query["query"]["bool"]["must"].append( + {"term": {"sourceID": indata.get("source")}} + ) + elif viewList: + query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}}) + + query["aggs"] = { + "by_job": { + "terms": {"field": "jobURL.keyword", "size": 5000}, + "aggs": { + "duration": {"sum": {"field": "duration"}}, + "ci": {"terms": {"field": "ci.keyword", "size": 1}}, + "name": {"terms": {"field": "job.keyword", "size": 1}}, + }, + } + } res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="ci_build", - size = 0, - body = query - ) + index=session.DB.dbname, doc_type="ci_build", size=0, body=query + ) jobs = [] - for doc in res['aggregations']['by_job']['buckets']: - job = doc['key'] - builds = doc['doc_count'] - duration = doc['duration']['value'] - ci = doc['ci']['buckets'][0]['key'] - jobname = doc['name']['buckets'][0]['key'] + for doc in res["aggregations"]["by_job"]["buckets"]: + job = doc["key"] + builds = doc["doc_count"] + duration = doc["duration"]["value"] + ci = doc["ci"]["buckets"][0]["key"] + jobname = doc["name"]["buckets"][0]["key"] jobs.append([builds, duration, jobname, ci]) - topjobs = sorted(jobs, key = lambda x: int(x[0]), reverse = True) + topjobs = sorted(jobs, key=lambda x: int(x[0]), reverse=True) tophash = {} for v in topjobs: tophash["%s (%s)" % (v[2], v[3])] = v[0] - JSON_OUT = { - 'counts': tophash, - 'okay': True, - 'responseTime': time.time() - now, - } + JSON_OUT = {"counts": tophash, "okay": True, "responseTime": time.time() - now} yield json.dumps(JSON_OUT) diff --git a/kibble/api/pages/ci/top-buildtime.py b/kibble/api/pages/ci/top-buildtime.py index a9481ee4..f3a8c800 100644 --- a/kibble/api/pages/ci/top-buildtime.py +++ b/kibble/api/pages/ci/top-buildtime.py @@ -61,9 +61,6 @@ ######################################################################## - - - """ This is the TopN CI jobs by total build time renderer for Kibble """ @@ -72,6 +69,7 @@ import time import re + def run(API, environ, indata, session): # We need to be logged in for this! @@ -82,103 +80,83 @@ def run(API, environ, indata, session): # First, fetch the view if we have such a thing enabled viewList = [] - if indata.get('view'): - viewList = session.getView(indata.get('view')) - if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) + if indata.get("view"): + viewList = session.getView(indata.get("view")) + if indata.get("subfilter"): + viewList = session.subFilter(indata.get("subfilter"), view=viewList) - dateTo = indata.get('to', int(time.time())) - dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span + dateTo = indata.get("to", int(time.time())) + dateFrom = indata.get( + "from", dateTo - (86400 * 30 * 6) + ) # Default to a 6 month span #################################################################### #################################################################### - dOrg = session.user['defaultOrganisation'] or "apache" + dOrg = session.user["defaultOrganisation"] or "apache" query = { - 'query': { - 'bool': { - 'must': [ - {'range': - { - 'date': { - 'from': time.strftime("%Y/%m/%d %H:%M:%S", time.gmtime(dateFrom)), - 'to': time.strftime("%Y/%m/%d %H:%M:%S", time.gmtime(dateTo)) - } - } - }, - { - 'term': { - 'organisation': dOrg - } + "query": { + "bool": { + "must": [ + { + "range": { + "date": { + "from": time.strftime( + "%Y/%m/%d %H:%M:%S", time.gmtime(dateFrom) + ), + "to": time.strftime( + "%Y/%m/%d %H:%M:%S", time.gmtime(dateTo) + ), } - ] - } - } - } - # Source-specific or view-specific?? - if indata.get('source'): - query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) - elif viewList: - query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) - - query['aggs'] = { - 'by_job': { - 'terms': { - 'field': 'jobURL.keyword', - 'size': 5000, - }, - 'aggs': { - 'duration': { - 'sum': { - 'field': 'duration' - } - }, - 'ci': { - 'terms': { - 'field': 'ci.keyword', - 'size': 1 } }, - 'name': { - 'terms': { - 'field': 'job.keyword', - 'size': 1 - } - } - } + {"term": {"organisation": dOrg}}, + ] } } + } + # Source-specific or view-specific?? + if indata.get("source"): + query["query"]["bool"]["must"].append( + {"term": {"sourceID": indata.get("source")}} + ) + elif viewList: + query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}}) + + query["aggs"] = { + "by_job": { + "terms": {"field": "jobURL.keyword", "size": 5000}, + "aggs": { + "duration": {"sum": {"field": "duration"}}, + "ci": {"terms": {"field": "ci.keyword", "size": 1}}, + "name": {"terms": {"field": "job.keyword", "size": 1}}, + }, + } + } res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="ci_build", - size = 0, - body = query - ) + index=session.DB.dbname, doc_type="ci_build", size=0, body=query + ) jobs = [] - for doc in res['aggregations']['by_job']['buckets']: - job = doc['key'] - builds = doc['doc_count'] - duration = doc['duration']['value'] - ci = doc['ci']['buckets'][0]['key'] - jobname = doc['name']['buckets'][0]['key'] + for doc in res["aggregations"]["by_job"]["buckets"]: + job = doc["key"] + builds = doc["doc_count"] + duration = doc["duration"]["value"] + ci = doc["ci"]["buckets"][0]["key"] + jobname = doc["name"]["buckets"][0]["key"] jobs.append([builds, duration, jobname, ci]) - topjobs = sorted(jobs, key = lambda x: int(x[1]), reverse = True) + topjobs = sorted(jobs, key=lambda x: int(x[1]), reverse=True) top = topjobs[0:24] if len(topjobs) > 25: count = 0 for repo in topjobs[24:]: count += repo[1] - top.append([1, count, "Other jobs", '??']) + top.append([1, count, "Other jobs", "??"]) tophash = {} for v in top: - tophash["%s (%s)" % (v[2], v[3])] = int((v[1]/360000))/10 + tophash["%s (%s)" % (v[2], v[3])] = int((v[1] / 360000)) / 10 - JSON_OUT = { - 'counts': tophash, - 'okay': True, - 'responseTime': time.time() - now, - } + JSON_OUT = {"counts": tophash, "okay": True, "responseTime": time.time() - now} yield json.dumps(JSON_OUT) diff --git a/kibble/api/pages/code/changes.py b/kibble/api/pages/code/changes.py index 6365d537..83fd305d 100644 --- a/kibble/api/pages/code/changes.py +++ b/kibble/api/pages/code/changes.py @@ -61,9 +61,6 @@ ######################################################################## - - - """ This is the code changes timeseries renderer for Kibble """ @@ -71,6 +68,7 @@ import json import time + def run(API, environ, indata, session): # We need to be logged in for this! @@ -81,114 +79,90 @@ def run(API, environ, indata, session): # First, fetch the view if we have such a thing enabled viewList = [] - if indata.get('view'): - viewList = session.getView(indata.get('view')) - if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) + if indata.get("view"): + viewList = session.getView(indata.get("view")) + if indata.get("subfilter"): + viewList = session.subFilter(indata.get("subfilter"), view=viewList) + dateTo = indata.get("to", int(time.time())) + dateFrom = indata.get( + "from", dateTo - (86400 * 30 * 6) + ) # Default to a 6 month span - dateTo = indata.get('to', int(time.time())) - dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span - - which = 'committer_email' - role = 'committer' - if indata.get('author', False): - which = 'author_email' - role = 'author' - - interval = indata.get('interval', 'day') + which = "committer_email" + role = "committer" + if indata.get("author", False): + which = "author_email" + role = "author" + interval = indata.get("interval", "day") #################################################################### #################################################################### - dOrg = session.user['defaultOrganisation'] or "apache" + dOrg = session.user["defaultOrganisation"] or "apache" query = { - 'query': { - 'bool': { - 'must': [ - {'range': - { - 'tsday': { - 'from': dateFrom, - 'to': dateTo - } - } - }, - { - 'term': { - 'organisation': dOrg - } - } - ] - } - } + "query": { + "bool": { + "must": [ + {"range": {"tsday": {"from": dateFrom, "to": dateTo}}}, + {"term": {"organisation": dOrg}}, + ] } + } + } # Source-specific or view-specific?? - if indata.get('source'): - query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) + if indata.get("source"): + query["query"]["bool"]["must"].append( + {"term": {"sourceID": indata.get("source")}} + ) elif viewList: - query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) - if indata.get('email'): - query['query']['bool']['should'] = [{'term': {'committer_email': indata.get('email')}}, {'term': {'author_email': indata.get('email')}}] - query['query']['bool']['minimum_should_match'] = 1 + query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}}) + if indata.get("email"): + query["query"]["bool"]["should"] = [ + {"term": {"committer_email": indata.get("email")}}, + {"term": {"author_email": indata.get("email")}}, + ] + query["query"]["bool"]["minimum_should_match"] = 1 # Path filter? - if indata.get('pathfilter'): - pf = indata.get('pathfilter') - if '!' in pf: - pf = pf.replace('!', '') - query['query']['bool']['must_not'] = query['query']['bool'].get('must_not', []) - query['query']['bool']['must_not'].append({'regexp': {'files_changed': pf}}) + if indata.get("pathfilter"): + pf = indata.get("pathfilter") + if "!" in pf: + pf = pf.replace("!", "") + query["query"]["bool"]["must_not"] = query["query"]["bool"].get( + "must_not", [] + ) + query["query"]["bool"]["must_not"].append({"regexp": {"files_changed": pf}}) else: - query['query']['bool']['must'].append({'regexp': {'files_changed': pf}}) + query["query"]["bool"]["must"].append({"regexp": {"files_changed": pf}}) # Get timeseries for this period - query['aggs'] = { - 'per_interval': { - 'date_histogram': { - 'field': 'date', - 'interval': interval - }, - 'aggs': { - 'insertions': { - 'sum': { - 'field': 'insertions' - } - }, - 'deletions': { - 'sum': { - 'field': 'deletions' - } - } - } - } + query["aggs"] = { + "per_interval": { + "date_histogram": {"field": "date", "interval": interval}, + "aggs": { + "insertions": {"sum": {"field": "insertions"}}, + "deletions": {"sum": {"field": "deletions"}}, + }, } + } res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="code_commit", - size = 0, - body = query - ) + index=session.DB.dbname, doc_type="code_commit", size=0, body=query + ) timeseries = [] - for bucket in res['aggregations']['per_interval']['buckets']: - ts = int(bucket['key'] / 1000) - icount = bucket['insertions']['value'] - dcount = bucket['deletions']['value'] - timeseries.append({ - 'date': ts, - 'insertions': icount, - 'deletions': dcount - }) + for bucket in res["aggregations"]["per_interval"]["buckets"]: + ts = int(bucket["key"] / 1000) + icount = bucket["insertions"]["value"] + dcount = bucket["deletions"]["value"] + timeseries.append({"date": ts, "insertions": icount, "deletions": dcount}) JSON_OUT = { - 'timeseries': timeseries, - 'interval': interval, - 'okay': True, - 'responseTime': time.time() - now, - 'widgetType': { - 'chartType': 'area' - } + "timeseries": timeseries, + "interval": interval, + "okay": True, + "responseTime": time.time() - now, + "widgetType": {"chartType": "area"}, } yield json.dumps(JSON_OUT) diff --git a/kibble/api/pages/code/commits.py b/kibble/api/pages/code/commits.py index 2b450387..c5377df5 100644 --- a/kibble/api/pages/code/commits.py +++ b/kibble/api/pages/code/commits.py @@ -61,9 +61,6 @@ ######################################################################## - - - """ This is the TopN committers list renderer for Kibble """ @@ -72,6 +69,7 @@ import time import hashlib + def run(API, environ, indata, session): # We need to be logged in for this! @@ -82,99 +80,82 @@ def run(API, environ, indata, session): # First, fetch the view if we have such a thing enabled viewList = [] - if indata.get('view'): - viewList = session.getView(indata.get('view')) - if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) + if indata.get("view"): + viewList = session.getView(indata.get("view")) + if indata.get("subfilter"): + viewList = session.subFilter(indata.get("subfilter"), view=viewList) + dateTo = indata.get("to", int(time.time())) + dateFrom = indata.get( + "from", dateTo - (86400 * 30 * 6) + ) # Default to a 6 month span - dateTo = indata.get('to', int(time.time())) - dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span - - which = 'committer_email' - role = 'committer' - if indata.get('author', False): - which = 'author_email' - role = 'author' - - interval = indata.get('interval', 'day') + which = "committer_email" + role = "committer" + if indata.get("author", False): + which = "author_email" + role = "author" + interval = indata.get("interval", "day") #################################################################### #################################################################### - dOrg = session.user['defaultOrganisation'] or "apache" + dOrg = session.user["defaultOrganisation"] or "apache" query = { - 'query': { - 'bool': { - 'must': [ - {'range': - { - 'tsday': { - 'from': dateFrom, - 'to': dateTo - } - } - }, - { - 'term': { - 'organisation': dOrg - } - } - ] - } - } + "query": { + "bool": { + "must": [ + {"range": {"tsday": {"from": dateFrom, "to": dateTo}}}, + {"term": {"organisation": dOrg}}, + ] } + } + } # Source-specific or view-specific?? - if indata.get('source'): - query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) + if indata.get("source"): + query["query"]["bool"]["must"].append( + {"term": {"sourceID": indata.get("source")}} + ) elif viewList: - query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) - if indata.get('email'): - query['query']['bool']['should'] = [{'term': {'committer_email': indata.get('email')}}, {'term': {'author_email': indata.get('email')}}] - query['query']['bool']['minimum_should_match'] = 1 + query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}}) + if indata.get("email"): + query["query"]["bool"]["should"] = [ + {"term": {"committer_email": indata.get("email")}}, + {"term": {"author_email": indata.get("email")}}, + ] + query["query"]["bool"]["minimum_should_match"] = 1 # Path filter? - if indata.get('pathfilter'): - pf = indata.get('pathfilter') - if '!' in pf: - pf = pf.replace('!', '') - query['query']['bool']['must_not'] = query['query']['bool'].get('must_not', []) - query['query']['bool']['must_not'].append({'regexp': {'files_changed': pf}}) + if indata.get("pathfilter"): + pf = indata.get("pathfilter") + if "!" in pf: + pf = pf.replace("!", "") + query["query"]["bool"]["must_not"] = query["query"]["bool"].get( + "must_not", [] + ) + query["query"]["bool"]["must_not"].append({"regexp": {"files_changed": pf}}) else: - query['query']['bool']['must'].append({'regexp': {'files_changed': pf}}) + query["query"]["bool"]["must"].append({"regexp": {"files_changed": pf}}) # Get number of committers, this period - query['aggs'] = { - 'commits': { - 'date_histogram': { - 'field': 'date', - 'interval': interval - } - } - } + query["aggs"] = { + "commits": {"date_histogram": {"field": "date", "interval": interval}} + } res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="code_commit", - size = 0, - body = query - ) + index=session.DB.dbname, doc_type="code_commit", size=0, body=query + ) timeseries = [] - for bucket in res['aggregations']['commits']['buckets']: - ts = int(bucket['key'] / 1000) - count = bucket['doc_count'] - timeseries.append({ - 'date': ts, - 'commits': count - }) + for bucket in res["aggregations"]["commits"]["buckets"]: + ts = int(bucket["key"] / 1000) + count = bucket["doc_count"] + timeseries.append({"date": ts, "commits": count}) JSON_OUT = { - 'widgetType': { - 'chartType': 'bar' # Recommendation for the UI - }, - 'timeseries': timeseries, - 'interval': interval, - 'okay': True, - 'responseTime': time.time() - now + "widgetType": {"chartType": "bar"}, # Recommendation for the UI + "timeseries": timeseries, + "interval": interval, + "okay": True, + "responseTime": time.time() - now, } yield json.dumps(JSON_OUT) diff --git a/kibble/api/pages/code/committers.py b/kibble/api/pages/code/committers.py index b48ae256..4bc0d36d 100644 --- a/kibble/api/pages/code/committers.py +++ b/kibble/api/pages/code/committers.py @@ -1,4 +1,3 @@ - # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information @@ -62,9 +61,6 @@ ######################################################################## - - - """ This is the TopN committers list renderer for Kibble """ @@ -73,6 +69,7 @@ import time import hashlib + def run(API, environ, indata, session): # We need to be logged in for this! @@ -83,187 +80,137 @@ def run(API, environ, indata, session): # First, fetch the view if we have such a thing enabled viewList = [] - if indata.get('view'): - viewList = session.getView(indata.get('view')) - if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - + if indata.get("view"): + viewList = session.getView(indata.get("view")) + if indata.get("subfilter"): + viewList = session.subFilter(indata.get("subfilter"), view=viewList) - dateTo = indata.get('to', int(time.time())) - dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span + dateTo = indata.get("to", int(time.time())) + dateFrom = indata.get( + "from", dateTo - (86400 * 30 * 6) + ) # Default to a 6 month span - which = 'committer_email' - role = 'committer' - if indata.get('author', False): - which = 'author_email' - role = 'author' - - interval = indata.get('interval', 'month') + which = "committer_email" + role = "committer" + if indata.get("author", False): + which = "author_email" + role = "author" + interval = indata.get("interval", "month") #################################################################### #################################################################### - dOrg = session.user['defaultOrganisation'] or "apache" + dOrg = session.user["defaultOrganisation"] or "apache" query = { - 'query': { - 'bool': { - 'must': [ - {'range': - { - 'tsday': { - 'from': dateFrom, - 'to': dateTo - } - } - }, - { - 'term': { - 'organisation': dOrg - } - } - ] - } - } + "query": { + "bool": { + "must": [ + {"range": {"tsday": {"from": dateFrom, "to": dateTo}}}, + {"term": {"organisation": dOrg}}, + ] } + } + } # Source-specific or view-specific?? - if indata.get('source'): - query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) + if indata.get("source"): + query["query"]["bool"]["must"].append( + {"term": {"sourceID": indata.get("source")}} + ) elif viewList: - query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) - if indata.get('email'): - query['query']['bool']['should'] = [{'term': {'committer_email': indata.get('email')}}, {'term': {'author_email': indata.get('email')}}] - query['query']['bool']['minimum_should_match'] = 1 + query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}}) + if indata.get("email"): + query["query"]["bool"]["should"] = [ + {"term": {"committer_email": indata.get("email")}}, + {"term": {"author_email": indata.get("email")}}, + ] + query["query"]["bool"]["minimum_should_match"] = 1 # Path filter? - if indata.get('pathfilter'): - pf = indata.get('pathfilter') - if '!' in pf: - pf = pf.replace('!', '') - query['query']['bool']['must_not'] = query['query']['bool'].get('must_not', []) - query['query']['bool']['must_not'].append({'regexp': {'files_changed': pf}}) + if indata.get("pathfilter"): + pf = indata.get("pathfilter") + if "!" in pf: + pf = pf.replace("!", "") + query["query"]["bool"]["must_not"] = query["query"]["bool"].get( + "must_not", [] + ) + query["query"]["bool"]["must_not"].append({"regexp": {"files_changed": pf}}) else: - query['query']['bool']['must'].append({'regexp': {'files_changed': pf}}) + query["query"]["bool"]["must"].append({"regexp": {"files_changed": pf}}) # Get top 25 committers this period - query['aggs'] = { - 'committers': { - 'terms': { - 'field': which, - 'size': 25 - }, - 'aggs': { - 'byinsertions': { - 'terms': { - 'field': which - }, - 'aggs': { - 'stats': { - 'sum': { - 'field': "insertions" - } - } - } + query["aggs"] = { + "committers": { + "terms": {"field": which, "size": 25}, + "aggs": { + "byinsertions": { + "terms": {"field": which}, + "aggs": {"stats": {"sum": {"field": "insertions"}}}, }, - 'bydeletions': { - 'terms': { - 'field': which - }, - 'aggs': { - 'stats': { - 'sum': { - 'field': "deletions" - } - } - } + "bydeletions": { + "terms": {"field": which}, + "aggs": {"stats": {"sum": {"field": "deletions"}}}, }, - } }, - } + } res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="code_commit", - size = 0, - body = query - ) + index=session.DB.dbname, doc_type="code_commit", size=0, body=query + ) people = {} - for bucket in res['aggregations']['committers']['buckets']: - email = bucket['key'] - count = bucket['doc_count'] - sha = hashlib.sha1( ("%s%s" % (dOrg, email)).encode('utf-8') ).hexdigest() - if session.DB.ES.exists(index=session.DB.dbname,doc_type="person",id = sha): - pres = session.DB.ES.get( - index=session.DB.dbname, - doc_type="person", - id = sha - ) - person = pres['_source'] - person['name'] = person.get('name', 'unknown') + for bucket in res["aggregations"]["committers"]["buckets"]: + email = bucket["key"] + count = bucket["doc_count"] + sha = hashlib.sha1(("%s%s" % (dOrg, email)).encode("utf-8")).hexdigest() + if session.DB.ES.exists(index=session.DB.dbname, doc_type="person", id=sha): + pres = session.DB.ES.get(index=session.DB.dbname, doc_type="person", id=sha) + person = pres["_source"] + person["name"] = person.get("name", "unknown") people[email] = person - people[email]['gravatar'] = hashlib.md5(person.get('email', 'unknown').encode('utf-8')).hexdigest() - people[email]['count'] = count - people[email]['subcount'] = { - 'insertions': int(bucket['byinsertions']['buckets'][0]['stats']['value']), - 'deletions': int(bucket['bydeletions']['buckets'][0]['stats']['value']) + people[email]["gravatar"] = hashlib.md5( + person.get("email", "unknown").encode("utf-8") + ).hexdigest() + people[email]["count"] = count + people[email]["subcount"] = { + "insertions": int( + bucket["byinsertions"]["buckets"][0]["stats"]["value"] + ), + "deletions": int(bucket["bydeletions"]["buckets"][0]["stats"]["value"]), } topN = [] for email, person in people.items(): topN.append(person) - topN = sorted(topN, key = lambda x: x['count'], reverse = True) + topN = sorted(topN, key=lambda x: x["count"], reverse=True) # Get timeseries for this period - query['aggs'] = { - 'per_interval': { - 'date_histogram': { - 'field': 'date', - 'interval': interval - }, - 'aggs': { - 'by_committer': { - 'cardinality': { - 'field': 'committer_email' - } - }, - 'by_author': { - 'cardinality': { - 'field': 'author_email' - } - } - } - } + query["aggs"] = { + "per_interval": { + "date_histogram": {"field": "date", "interval": interval}, + "aggs": { + "by_committer": {"cardinality": {"field": "committer_email"}}, + "by_author": {"cardinality": {"field": "author_email"}}, + }, } + } res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="code_commit", - size = 0, - body = query - ) + index=session.DB.dbname, doc_type="code_commit", size=0, body=query + ) timeseries = [] - for bucket in res['aggregations']['per_interval']['buckets']: - ts = int(bucket['key'] / 1000) - ccount = bucket['by_committer']['value'] - acount = bucket['by_author']['value'] - timeseries.append({ - 'date': ts, - 'committers': ccount, - 'authors': acount - }) + for bucket in res["aggregations"]["per_interval"]["buckets"]: + ts = int(bucket["key"] / 1000) + ccount = bucket["by_committer"]["value"] + acount = bucket["by_author"]["value"] + timeseries.append({"date": ts, "committers": ccount, "authors": acount}) JSON_OUT = { - 'topN': { - 'denoter': 'commits', - 'items': topN - }, - 'timeseries': timeseries, - 'sorted': people, - 'okay': True, - 'responseTime': time.time() - now, - 'widgetType': { - 'chartType': 'bar' - } + "topN": {"denoter": "commits", "items": topN}, + "timeseries": timeseries, + "sorted": people, + "okay": True, + "responseTime": time.time() - now, + "widgetType": {"chartType": "bar"}, } yield json.dumps(JSON_OUT) diff --git a/kibble/api/pages/code/evolution.py b/kibble/api/pages/code/evolution.py index dd33c123..0fa9cdc5 100644 --- a/kibble/api/pages/code/evolution.py +++ b/kibble/api/pages/code/evolution.py @@ -1,4 +1,3 @@ - # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information @@ -62,9 +61,6 @@ ######################################################################## - - - """ This is the TopN committers list renderer for Kibble """ @@ -73,6 +69,7 @@ import time import hashlib + def run(API, environ, indata, session): # We need to be logged in for this! @@ -83,94 +80,80 @@ def run(API, environ, indata, session): # First, fetch the view if we have such a thing enabled viewList = [] - if indata.get('view'): - viewList = session.getView(indata.get('view')) - if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - + if indata.get("view"): + viewList = session.getView(indata.get("view")) + if indata.get("subfilter"): + viewList = session.subFilter(indata.get("subfilter"), view=viewList) breakdown = False onlycode = False - #################################################################### #################################################################### - dOrg = session.user['defaultOrganisation'] or "apache" + dOrg = session.user["defaultOrganisation"] or "apache" query = { - 'query': { - 'bool': { - 'must': [ - {'range': - { - 'time': { - 'from': 0, - 'to': int(time.time()) - } - } - }, - { - 'term': { - 'organisation': dOrg - } - } - ] - } - } + "query": { + "bool": { + "must": [ + {"range": {"time": {"from": 0, "to": int(time.time())}}}, + {"term": {"organisation": dOrg}}, + ] } + } + } # Source-specific or view-specific?? - if indata.get('source'): - query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) + if indata.get("source"): + query["query"]["bool"]["must"].append( + {"term": {"sourceID": indata.get("source")}} + ) elif viewList: - query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) + query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}}) # We need scrolling here! res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="evolution", - scroll = '2m', - size = 5000, - body = query - ) - sid = res['_scroll_id'] - scroll_size = res['hits']['total'] + index=session.DB.dbname, + doc_type="evolution", + scroll="2m", + size=5000, + body=query, + ) + sid = res["_scroll_id"] + scroll_size = res["hits"]["total"] if type(scroll_size) is dict: - scroll_size = scroll_size['value'] # ES >= 7.x + scroll_size = scroll_size["value"] # ES >= 7.x timeseries = [] tstmp = {} - while (scroll_size > 0): - for doc in res['hits']['hits']: - updates = doc['_source'] - ts = updates['time'] #round(updates['time']/86400) * 86400 - if updates['time'] % 86400 != 0: + while scroll_size > 0: + for doc in res["hits"]["hits"]: + updates = doc["_source"] + ts = updates["time"] # round(updates['time']/86400) * 86400 + if updates["time"] % 86400 != 0: continue tstmp[ts] = tstmp.get(ts, {}) item = tstmp[ts] if breakdown: pass else: - item['code'] = item.get('code', 0) + (updates['loc'] or 0) - item['comments'] = item.get('comments', 0) + (updates['comments'] or 0) - item['blanks'] = item.get('blanks', 0) + (updates['blank'] or 0) + item["code"] = item.get("code", 0) + (updates["loc"] or 0) + item["comments"] = item.get("comments", 0) + (updates["comments"] or 0) + item["blanks"] = item.get("blanks", 0) + (updates["blank"] or 0) - res = session.DB.ES.scroll(scroll_id = sid, scroll = '1m') - sid = res['_scroll_id'] - scroll_size = len(res['hits']['hits']) + res = session.DB.ES.scroll(scroll_id=sid, scroll="1m") + sid = res["_scroll_id"] + scroll_size = len(res["hits"]["hits"]) for k, v in tstmp.items(): - v['date'] = k + v["date"] = k timeseries.append(v) - timeseries = sorted(timeseries, key = lambda x: x['date']) + timeseries = sorted(timeseries, key=lambda x: x["date"]) JSON_OUT = { - 'widgetType': { - 'chartType': 'line', # Recommendation for the UI - 'stack': True - }, - 'timeseries': timeseries, - 'sortOrder': ['code', 'comments', 'blanks'], - 'okay': True, - 'responseTime': time.time() - now + "widgetType": {"chartType": "line", "stack": True}, # Recommendation for the UI + "timeseries": timeseries, + "sortOrder": ["code", "comments", "blanks"], + "okay": True, + "responseTime": time.time() - now, } yield json.dumps(JSON_OUT) diff --git a/kibble/api/pages/code/pony-timeseries.py b/kibble/api/pages/code/pony-timeseries.py index 2a27c009..8e1d254e 100644 --- a/kibble/api/pages/code/pony-timeseries.py +++ b/kibble/api/pages/code/pony-timeseries.py @@ -1,4 +1,3 @@ - # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information @@ -62,9 +61,6 @@ ######################################################################## - - - """ This is the pony factor renderer for Kibble """ @@ -75,6 +71,7 @@ import datetime import dateutil.relativedelta + def run(API, environ, indata, session): # We need to be logged in for this! @@ -85,13 +82,12 @@ def run(API, environ, indata, session): # First, fetch the view if we have such a thing enabled viewList = [] - if indata.get('view'): - viewList = session.getView(indata.get('view')) - if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - + if indata.get("view"): + viewList = session.getView(indata.get("view")) + if indata.get("subfilter"): + viewList = session.subFilter(indata.get("subfilter"), view=viewList) - hl = indata.get('span', 24) + hl = indata.get("span", 24) tnow = datetime.date.today() nm = tnow.month - (tnow.month % 3) ny = tnow.year @@ -111,107 +107,84 @@ def run(API, environ, indata, session): nm += 12 ny = ny - 1 - #################################################################### #################################################################### - dOrg = session.user['defaultOrganisation'] or "apache" + dOrg = session.user["defaultOrganisation"] or "apache" query = { - 'query': { - 'bool': { - 'must': [ - {'range': - { - 'tsday': { - 'from': tf, - 'to': t - } - } - }, - { - 'term': { - 'organisation': dOrg - } - } - ] - } - } + "query": { + "bool": { + "must": [ + {"range": {"tsday": {"from": tf, "to": t}}}, + {"term": {"organisation": dOrg}}, + ] } + } + } # Source-specific or view-specific?? - if indata.get('source'): - query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) + if indata.get("source"): + query["query"]["bool"]["must"].append( + {"term": {"sourceID": indata.get("source")}} + ) elif viewList: - query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) + query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}}) # Get an initial count of commits res = session.DB.ES.count( - index=session.DB.dbname, - doc_type="code_commit", - body = query - ) + index=session.DB.dbname, doc_type="code_commit", body=query + ) - globcount = res['count'] + globcount = res["count"] if globcount == 0: break # Get top 25 committers this period - query['aggs'] = { - 'by_committer': { - 'terms': { - 'field': 'committer_email', - 'size': 1000 - } - }, - 'by_author': { - 'terms': { - 'field': 'author_email', - 'size': 1000 - } - } - } + query["aggs"] = { + "by_committer": {"terms": {"field": "committer_email", "size": 1000}}, + "by_author": {"terms": {"field": "author_email", "size": 1000}}, + } res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="code_commit", - size = 0, - body = query - ) - + index=session.DB.dbname, doc_type="code_commit", size=0, body=query + ) # PF for committers pf_committer = 0 pf_committer_count = 0 - for bucket in res['aggregations']['by_committer']['buckets']: - count = bucket['doc_count'] + for bucket in res["aggregations"]["by_committer"]["buckets"]: + count = bucket["doc_count"] pf_committer += 1 pf_committer_count += count - if pf_committer_count > int(globcount/2): + if pf_committer_count > int(globcount / 2): break # PF for authors pf_author = 0 pf_author_count = 0 cpf = {} - for bucket in res['aggregations']['by_author']['buckets']: - count = bucket['doc_count'] + for bucket in res["aggregations"]["by_author"]["buckets"]: + count = bucket["doc_count"] pf_author += 1 pf_author_count += count - if '@' in bucket['key']: - mldom = bucket['key'].lower().split('@')[-1] + if "@" in bucket["key"]: + mldom = bucket["key"].lower().split("@")[-1] cpf[mldom] = True - if pf_author_count > int(globcount/2): + if pf_author_count > int(globcount / 2): break - ts.append({ - 'date': t, - 'Pony Factor (committership)': pf_committer, - 'Pony Factor (authorship)': pf_author, - 'Meta-Pony Factor': len(cpf) - }) + ts.append( + { + "date": t, + "Pony Factor (committership)": pf_committer, + "Pony Factor (authorship)": pf_author, + "Meta-Pony Factor": len(cpf), + } + ) - ts = sorted(ts, key = lambda x: x['date']) + ts = sorted(ts, key=lambda x: x["date"]) JSON_OUT = { - 'text': "This shows Pony Factors as calculated over a %u month timespan. Authorship measures the people writing the bulk of the codebase, committership mesaures the people committing (merging) the code, and meta-pony is an estimation of how many organisations/companies are involved." % hl, - 'timeseries': ts, - 'okay': True, - 'responseTime': time.time() - now, + "text": "This shows Pony Factors as calculated over a %u month timespan. Authorship measures the people writing the bulk of the codebase, committership mesaures the people committing (merging) the code, and meta-pony is an estimation of how many organisations/companies are involved." + % hl, + "timeseries": ts, + "okay": True, + "responseTime": time.time() - now, } yield json.dumps(JSON_OUT) diff --git a/kibble/api/pages/code/pony.py b/kibble/api/pages/code/pony.py index 895a17af..2c5b48d2 100644 --- a/kibble/api/pages/code/pony.py +++ b/kibble/api/pages/code/pony.py @@ -1,4 +1,3 @@ - # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information @@ -62,9 +61,6 @@ ######################################################################## - - - """ This is the pony factor renderer for Kibble """ @@ -73,6 +69,7 @@ import time import re + def run(API, environ, indata, session): # We need to be logged in for this! @@ -83,209 +80,158 @@ def run(API, environ, indata, session): # First, fetch the view if we have such a thing enabled viewList = [] - if indata.get('view'): - viewList = session.getView(indata.get('view')) - if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - - - dateTo = indata.get('to', int(time.time())) - dateFrom = indata.get('from', dateTo - (86400*30*24)) # Default to a 24 month span + if indata.get("view"): + viewList = session.getView(indata.get("view")) + if indata.get("subfilter"): + viewList = session.subFilter(indata.get("subfilter"), view=viewList) + + dateTo = indata.get("to", int(time.time())) + dateFrom = indata.get( + "from", dateTo - (86400 * 30 * 24) + ) # Default to a 24 month span if dateFrom < 0: dateFrom = 0 dateYonder = dateFrom - (dateTo - dateFrom) - #################################################################### #################################################################### - dOrg = session.user['defaultOrganisation'] or "apache" + dOrg = session.user["defaultOrganisation"] or "apache" query = { - 'query': { - 'bool': { - 'must': [ - {'range': - { - 'tsday': { - 'from': dateFrom, - 'to': dateTo - } - } - }, - { - 'term': { - 'organisation': dOrg - } - } - ] - } - } + "query": { + "bool": { + "must": [ + {"range": {"tsday": {"from": dateFrom, "to": dateTo}}}, + {"term": {"organisation": dOrg}}, + ] } + } + } # Source-specific or view-specific?? - if indata.get('source'): - query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) + if indata.get("source"): + query["query"]["bool"]["must"].append( + {"term": {"sourceID": indata.get("source")}} + ) elif viewList: - query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) + query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}}) # Get an initial count of commits res = session.DB.ES.count( - index=session.DB.dbname, - doc_type="code_commit", - body = query - ) + index=session.DB.dbname, doc_type="code_commit", body=query + ) - globcount = res['count'] + globcount = res["count"] # Get top 25 committers this period - query['aggs'] = { - 'by_committer': { - 'terms': { - 'field': 'committer_email', - 'size': 5000 - } - }, - 'by_author': { - 'terms': { - 'field': 'author_email', - 'size': 5000 - } - } - } + query["aggs"] = { + "by_committer": {"terms": {"field": "committer_email", "size": 5000}}, + "by_author": {"terms": {"field": "author_email", "size": 5000}}, + } res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="code_commit", - size = 0, - body = query - ) - + index=session.DB.dbname, doc_type="code_commit", size=0, body=query + ) # PF for committers pf_committer = 0 pf_committer_count = 0 - for bucket in res['aggregations']['by_committer']['buckets']: - count = bucket['doc_count'] + for bucket in res["aggregations"]["by_committer"]["buckets"]: + count = bucket["doc_count"] pf_committer += 1 pf_committer_count += count - if pf_committer_count > int(globcount/2): + if pf_committer_count > int(globcount / 2): break # PF for authors pf_author = 0 pf_author_count = 0 cpf = {} - for bucket in res['aggregations']['by_author']['buckets']: - count = bucket['doc_count'] + for bucket in res["aggregations"]["by_author"]["buckets"]: + count = bucket["doc_count"] pf_author += 1 pf_author_count += count - mldom = bucket['key'].lower().split('@')[1] + mldom = bucket["key"].lower().split("@")[1] cpf[mldom] = True - if pf_author_count > int(globcount/2): + if pf_author_count > int(globcount / 2): break - #################################################################### #################################################################### - dOrg = session.user['defaultOrganisation'] or "apache" + dOrg = session.user["defaultOrganisation"] or "apache" query = { - 'query': { - 'bool': { - 'must': [ - {'range': - { - 'tsday': { - 'from': dateYonder, - 'to': dateFrom-1 - } - } - }, - { - 'term': { - 'organisation': dOrg - } - } - ] - } - } + "query": { + "bool": { + "must": [ + {"range": {"tsday": {"from": dateYonder, "to": dateFrom - 1}}}, + {"term": {"organisation": dOrg}}, + ] } + } + } # Source-specific or view-specific?? - if indata.get('source'): - query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) + if indata.get("source"): + query["query"]["bool"]["must"].append( + {"term": {"sourceID": indata.get("source")}} + ) elif viewList: - query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) + query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}}) # Get an initial count of commits res = session.DB.ES.count( - index=session.DB.dbname, - doc_type="code_commit", - body = query - ) + index=session.DB.dbname, doc_type="code_commit", body=query + ) - globcount = res['count'] + globcount = res["count"] # Get top 25 committers this period - query['aggs'] = { - 'by_committer': { - 'terms': { - 'field': 'committer_email', - 'size': 5000 - } - }, - 'by_author': { - 'terms': { - 'field': 'author_email', - 'size': 5000 - } - } - } + query["aggs"] = { + "by_committer": {"terms": {"field": "committer_email", "size": 5000}}, + "by_author": {"terms": {"field": "author_email", "size": 5000}}, + } res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="code_commit", - size = 0, - body = query - ) - + index=session.DB.dbname, doc_type="code_commit", size=0, body=query + ) # PF for committers pf_committer_b = 0 pf_committer_count = 0 - for bucket in res['aggregations']['by_committer']['buckets']: - count = bucket['doc_count'] + for bucket in res["aggregations"]["by_committer"]["buckets"]: + count = bucket["doc_count"] pf_committer_b += 1 pf_committer_count += count - if pf_committer_count > int(globcount/2): + if pf_committer_count > int(globcount / 2): break # PF for authors pf_author_b = 0 pf_author_count = 0 cpf_b = {} - for bucket in res['aggregations']['by_author']['buckets']: - count = bucket['doc_count'] + for bucket in res["aggregations"]["by_author"]["buckets"]: + count = bucket["doc_count"] pf_author_b += 1 pf_author_count += count - mldom = bucket['key'].lower().split('@')[1] + mldom = bucket["key"].lower().split("@")[1] cpf_b[mldom] = True - if pf_author_count > int(globcount/2): + if pf_author_count > int(globcount / 2): break JSON_OUT = { - 'factors': [ + "factors": [ { - 'title': "Pony Factor (by committership)", - 'count': pf_committer, - 'previous': pf_committer_b + "title": "Pony Factor (by committership)", + "count": pf_committer, + "previous": pf_committer_b, }, { - 'title': "Pony Factor (by authorship)", - 'count': pf_author, - 'previous': pf_author_b + "title": "Pony Factor (by authorship)", + "count": pf_author, + "previous": pf_author_b, }, { - 'title': "Meta-Pony Factor (by authorship)", - 'count': len(cpf), - 'previous': len(cpf_b) - } + "title": "Meta-Pony Factor (by authorship)", + "count": len(cpf), + "previous": len(cpf_b), + }, ], - 'okay': True, - 'responseTime': time.time() - now, + "okay": True, + "responseTime": time.time() - now, } yield json.dumps(JSON_OUT) diff --git a/kibble/api/pages/code/punchcard.py b/kibble/api/pages/code/punchcard.py index babbaeba..f588b3b8 100644 --- a/kibble/api/pages/code/punchcard.py +++ b/kibble/api/pages/code/punchcard.py @@ -1,4 +1,3 @@ - # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information @@ -62,9 +61,6 @@ ######################################################################## - - - """ This is the commit punch-card renderer for Kibble """ @@ -73,6 +69,7 @@ import time import hashlib + def run(API, environ, indata, session): # We need to be logged in for this! @@ -83,97 +80,84 @@ def run(API, environ, indata, session): # First, fetch the view if we have such a thing enabled viewList = [] - if indata.get('view'): - viewList = session.getView(indata.get('view')) - if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - - - dateTo = indata.get('to', int(time.time())) - dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span + if indata.get("view"): + viewList = session.getView(indata.get("view")) + if indata.get("subfilter"): + viewList = session.subFilter(indata.get("subfilter"), view=viewList) - which = 'committer_email' - role = 'committer' - if indata.get('author', False): - which = 'author_email' - role = 'author' + dateTo = indata.get("to", int(time.time())) + dateFrom = indata.get( + "from", dateTo - (86400 * 30 * 6) + ) # Default to a 6 month span - interval = indata.get('interval', 'day') + which = "committer_email" + role = "committer" + if indata.get("author", False): + which = "author_email" + role = "author" + interval = indata.get("interval", "day") #################################################################### #################################################################### - dOrg = session.user['defaultOrganisation'] or "apache" + dOrg = session.user["defaultOrganisation"] or "apache" query = { - 'query': { - 'bool': { - 'must': [ - {'range': - { - 'tsday': { - 'from': dateFrom, - 'to': dateTo - } - } - }, - { - 'term': { - 'organisation': dOrg - } - } - ] - } - } + "query": { + "bool": { + "must": [ + {"range": {"tsday": {"from": dateFrom, "to": dateTo}}}, + {"term": {"organisation": dOrg}}, + ] } + } + } # Source-specific or view-specific?? - if indata.get('source'): - query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) + if indata.get("source"): + query["query"]["bool"]["must"].append( + {"term": {"sourceID": indata.get("source")}} + ) elif viewList: - query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) - if indata.get('email'): - query['query']['bool']['should'] = [{'term': {'committer_email': indata.get('email')}}, {'term': {'author_email': indata.get('email')}}] - query['query']['bool']['minimum_should_match'] = 1 + query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}}) + if indata.get("email"): + query["query"]["bool"]["should"] = [ + {"term": {"committer_email": indata.get("email")}}, + {"term": {"author_email": indata.get("email")}}, + ] + query["query"]["bool"]["minimum_should_match"] = 1 # Path filter? - if indata.get('pathfilter'): - pf = indata.get('pathfilter') - if '!' in pf: - pf = pf.replace('!', '') - query['query']['bool']['must_not'] = query['query']['bool'].get('must_not', []) - query['query']['bool']['must_not'].append({'regexp': {'files_changed': pf}}) + if indata.get("pathfilter"): + pf = indata.get("pathfilter") + if "!" in pf: + pf = pf.replace("!", "") + query["query"]["bool"]["must_not"] = query["query"]["bool"].get( + "must_not", [] + ) + query["query"]["bool"]["must_not"].append({"regexp": {"files_changed": pf}}) else: - query['query']['bool']['must'].append({'regexp': {'files_changed': pf}}) + query["query"]["bool"]["must"].append({"regexp": {"files_changed": pf}}) # Get number of committers, this period - query['aggs'] = { - 'commits': { - 'date_histogram': { - 'field': 'date', - 'interval': 'hour', - "format": "E - k" - } - } + query["aggs"] = { + "commits": { + "date_histogram": {"field": "date", "interval": "hour", "format": "E - k"} } + } res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="code_commit", - size = 0, - body = query - ) + index=session.DB.dbname, doc_type="code_commit", size=0, body=query + ) timeseries = {} - for bucket in res['aggregations']['commits']['buckets']: - ts = bucket['key_as_string'] - count = bucket['doc_count'] + for bucket in res["aggregations"]["commits"]["buckets"]: + ts = bucket["key_as_string"] + count = bucket["doc_count"] timeseries[ts] = timeseries.get(ts, 0) + count JSON_OUT = { - 'widgetType': { - 'chartType': 'punchcard' # Recommendation for the UI - }, - 'timeseries': timeseries, - 'interval': interval, - 'okay': True, - 'responseTime': time.time() - now + "widgetType": {"chartType": "punchcard"}, # Recommendation for the UI + "timeseries": timeseries, + "interval": interval, + "okay": True, + "responseTime": time.time() - now, } yield json.dumps(JSON_OUT) diff --git a/kibble/api/pages/code/relationships.py b/kibble/api/pages/code/relationships.py index f722b7ce..43b1a9e3 100644 --- a/kibble/api/pages/code/relationships.py +++ b/kibble/api/pages/code/relationships.py @@ -1,4 +1,3 @@ - # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information @@ -62,9 +61,6 @@ ######################################################################## - - - """ This is the committer relationship list renderer for Kibble """ @@ -76,6 +72,7 @@ import re import math + def run(API, environ, indata, session): # We need to be logged in for this! @@ -86,71 +83,61 @@ def run(API, environ, indata, session): # First, fetch the view if we have such a thing enabled viewList = [] - if indata.get('view'): - viewList = session.getView(indata.get('view')) - if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) + if indata.get("view"): + viewList = session.getView(indata.get("view")) + if indata.get("subfilter"): + viewList = session.subFilter(indata.get("subfilter"), view=viewList) - dateTo = indata.get('to', int(time.time())) - dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span + dateTo = indata.get("to", int(time.time())) + dateFrom = indata.get( + "from", dateTo - (86400 * 30 * 6) + ) # Default to a 6 month span - which = 'committer_email' - role = 'committer' - if indata.get('author', False): - which = 'author_email' - role = 'author' - - interval = indata.get('interval', 'day') + which = "committer_email" + role = "committer" + if indata.get("author", False): + which = "author_email" + role = "author" + interval = indata.get("interval", "day") #################################################################### #################################################################### - dOrg = session.user['defaultOrganisation'] or "apache" + dOrg = session.user["defaultOrganisation"] or "apache" query = { - 'query': { - 'bool': { - 'must': [ - {'range': - { - 'tsday': { - 'from': dateFrom, - 'to': dateTo - } - } - }, - { - 'term': { - 'organisation': dOrg - } - } - ] - } - } + "query": { + "bool": { + "must": [ + {"range": {"tsday": {"from": dateFrom, "to": dateTo}}}, + {"term": {"organisation": dOrg}}, + ] } + } + } # Source-specific or view-specific?? - if indata.get('source'): - query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) + if indata.get("source"): + query["query"]["bool"]["must"].append( + {"term": {"sourceID": indata.get("source")}} + ) elif viewList: - query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) - if indata.get('email'): - query['query']['bool']['must'].append({'term': {'committer_email' if not indata.get('author') else 'author_email': indata.get('email')}}) - - # Get number of commits, this period, per repo - query['aggs'] = { - 'per_repo': { - 'terms': { - 'field': 'sourceID', - 'size': 10000 + query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}}) + if indata.get("email"): + query["query"]["bool"]["must"].append( + { + "term": { + "committer_email" + if not indata.get("author") + else "author_email": indata.get("email") } } - } - res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="code_commit", - size = 0, - body = query ) + # Get number of commits, this period, per repo + query["aggs"] = {"per_repo": {"terms": {"field": "sourceID", "size": 10000}}} + res = session.DB.ES.search( + index=session.DB.dbname, doc_type="code_commit", size=0, body=query + ) + repos = {} repo_commits = {} authorlinks = {} @@ -158,33 +145,32 @@ def run(API, environ, indata, session): max_links = 0 max_shared = 0 max_authors = 0 - minLinks = indata.get('links', 1) + minLinks = indata.get("links", 1) # For each repo, count commits and gather data on authors - for doc in res['aggregations']['per_repo']['buckets']: - sourceID = doc['key'] - commits = doc['doc_count'] + for doc in res["aggregations"]["per_repo"]["buckets"]: + sourceID = doc["key"] + commits = doc["doc_count"] # Gather the unique authors/committers - query['aggs'] = { - 'per_contributor': { - 'terms': { - 'field': 'committer_email' if not indata.get('author') else 'author_email', - 'size': 10000 + query["aggs"] = { + "per_contributor": { + "terms": { + "field": "committer_email" + if not indata.get("author") + else "author_email", + "size": 10000, } } } xquery = copy.deepcopy(query) - xquery['query']['bool']['must'].append({'term': {'sourceID': sourceID}}) + xquery["query"]["bool"]["must"].append({"term": {"sourceID": sourceID}}) xres = session.DB.ES.search( - index=session.DB.dbname, - doc_type="code_commit", - size = 0, - body = xquery + index=session.DB.dbname, doc_type="code_commit", size=0, body=xquery ) authors = [] - for person in xres['aggregations']['per_contributor']['buckets']: - authors.append(person['key']) + for person in xres["aggregations"]["per_contributor"]["buckets"]: + authors.append(person["key"]) if commits > max_commits: max_commits = commits repos[sourceID] = authors @@ -199,9 +185,11 @@ def run(API, environ, indata, session): # Grab data of all sources for ID, repo in repos.items(): mylinks = {} - if not session.DB.ES.exists(index=session.DB.dbname, doc_type="source", id = ID): + if not session.DB.ES.exists(index=session.DB.dbname, doc_type="source", id=ID): continue - repodatas[ID] = session.DB.ES.get(index=session.DB.dbname, doc_type="source", id = ID) + repodatas[ID] = session.DB.ES.get( + index=session.DB.dbname, doc_type="source", id=ID + ) for ID, repo in repos.items(): mylinks = {} @@ -209,49 +197,59 @@ def run(API, environ, indata, session): continue repodata = repodatas[ID] oID = ID - if indata.get('collapse'): - m = re.search(indata.get('collapse'), repodata['_source']['sourceURL']) + if indata.get("collapse"): + m = re.search(indata.get("collapse"), repodata["_source"]["sourceURL"]) if m: ID = m.group(1) else: - ID = re.sub(r"^.+/", "", repodata['_source']['sourceURL']) + ID = re.sub(r"^.+/", "", repodata["_source"]["sourceURL"]) for xID, xrepo in repos.items(): if xID in repodatas: xrepodata = repodatas[xID] - if indata.get('collapse'): - m = re.search(indata.get('collapse'), xrepodata['_source']['sourceURL']) + if indata.get("collapse"): + m = re.search( + indata.get("collapse"), xrepodata["_source"]["sourceURL"] + ) if m: xID = m.group(1) else: - xID = re.sub(r"^.+/", "", xrepodata['_source']['sourceURL']) + xID = re.sub(r"^.+/", "", xrepodata["_source"]["sourceURL"]) if xID != ID: xlinks = [] for author in xrepo: if author in repo: xlinks.append(author) - lname = "%s@%s" % (ID, xID) # Link name - rname = "%s@%s" % (xID, ID) # Reverse link name + lname = "%s@%s" % (ID, xID) # Link name + rname = "%s@%s" % (xID, ID) # Reverse link name if len(xlinks) >= minLinks and not rname in repo_links: mylinks[xID] = len(xlinks) - repo_links[lname] = repo_links.get(lname, 0) + len(xlinks) # How many contributors in common between project A and B? + repo_links[lname] = repo_links.get(lname, 0) + len( + xlinks + ) # How many contributors in common between project A and B? if repo_links[lname] > max_shared: max_shared = repo_links[lname] if ID not in repo_notoriety: repo_notoriety[ID] = set() - repo_notoriety[ID].update(mylinks.keys()) # How many projects is this repo connected to? + repo_notoriety[ID].update( + mylinks.keys() + ) # How many projects is this repo connected to? if ID not in repo_authors: repo_authors[ID] = set() - repo_authors[ID].update(repo) # How many projects is this repo connected to? + repo_authors[ID].update(repo) # How many projects is this repo connected to? if ID != oID: repo_commits[ID] = repo_commits.get(ID, 0) + repo_commits[oID] if repo_commits[ID] > max_commits: - max_commits = repo_commits[ID] # Used for calculating max link thickness + max_commits = repo_commits[ + ID + ] # Used for calculating max link thickness if len(repo_notoriety[ID]) > max_links: max_links = len(repo_notoriety[ID]) if len(repo_authors[ID]) > max_authors: - max_authors = len(repo_authors[ID]) # Used for calculating max sphere size in charts + max_authors = len( + repo_authors[ID] + ) # Used for calculating max sphere size in charts # Now, pull it all together! nodes = [] @@ -260,45 +258,44 @@ def run(API, environ, indata, session): for sourceID in repo_notoriety.keys(): lsize = 0 for k in repo_links.keys(): - fr, to = k.split('@') + fr, to = k.split("@") if fr == sourceID or to == sourceID: lsize += 1 asize = len(repo_authors[sourceID]) doc = { - 'id': sourceID, - 'name': sourceID, - 'commits': repo_commits[sourceID], - 'authors': asize, - 'links': lsize, - 'size': max(5, (1 - abs(math.log10(asize / max_authors))) * 45), - 'tooltip': "%u connections, %u contributors, %u commits" % (lsize, asize, repo_commits[sourceID]) + "id": sourceID, + "name": sourceID, + "commits": repo_commits[sourceID], + "authors": asize, + "links": lsize, + "size": max(5, (1 - abs(math.log10(asize / max_authors))) * 45), + "tooltip": "%u connections, %u contributors, %u commits" + % (lsize, asize, repo_commits[sourceID]), } nodes.append(doc) existing_repos.append(sourceID) for k, s in repo_links.items(): size = s - fr, to = k.split('@') + fr, to = k.split("@") if fr in existing_repos and to in existing_repos: doc = { - 'source': fr, - 'target': to, - 'value': max(1, (size/max_shared) * 8), - 'name': "%s ↔ %s" % (fr, to), - 'tooltip': "%u committers in common" % size + "source": fr, + "target": to, + "value": max(1, (size / max_shared) * 8), + "name": "%s ↔ %s" % (fr, to), + "tooltip": "%u committers in common" % size, } links.append(doc) JSON_OUT = { - 'maxLinks': max_links, - 'maxShared': max_shared, - 'widgetType': { - 'chartType': 'link' # Recommendation for the UI - }, - 'links': links, - 'nodes': nodes, - 'interval': interval, - 'okay': True, - 'responseTime': time.time() - now + "maxLinks": max_links, + "maxShared": max_shared, + "widgetType": {"chartType": "link"}, # Recommendation for the UI + "links": links, + "nodes": nodes, + "interval": interval, + "okay": True, + "responseTime": time.time() - now, } yield json.dumps(JSON_OUT) diff --git a/kibble/api/pages/code/retention.py b/kibble/api/pages/code/retention.py index 31debdc0..0ae308a1 100644 --- a/kibble/api/pages/code/retention.py +++ b/kibble/api/pages/code/retention.py @@ -1,4 +1,3 @@ - # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information @@ -62,9 +61,6 @@ ######################################################################## - - - """ This is the code contributor retention factor renderer for Kibble """ @@ -74,6 +70,7 @@ import re import datetime + def run(API, environ, indata, session): # We need to be logged in for this! @@ -84,13 +81,14 @@ def run(API, environ, indata, session): # First, fetch the view if we have such a thing enabled viewList = [] - if indata.get('view'): - viewList = session.getView(indata.get('view')) - if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - - - hl = indata.get('span', 12) # By default, we define a contributor as active if having committer in the past year + if indata.get("view"): + viewList = session.getView(indata.get("view")) + if indata.get("subfilter"): + viewList = session.subFilter(indata.get("subfilter"), view=viewList) + + hl = indata.get( + "span", 12 + ) # By default, we define a contributor as active if having committer in the past year tnow = datetime.date.today() nm = tnow.month - (tnow.month % 3) ny = tnow.year @@ -107,7 +105,7 @@ def run(API, environ, indata, session): FoundSomething = False ny = 1970 - while ny < cy or (ny == cy and (nm+3) <= tnow.month): + while ny < cy or (ny == cy and (nm + 3) <= tnow.month): d = datetime.date(ny, nm, 1) t = time.mktime(d.timetuple()) nm += 3 @@ -121,76 +119,51 @@ def run(API, environ, indata, session): #################################################################### #################################################################### - dOrg = session.user['defaultOrganisation'] or "apache" + dOrg = session.user["defaultOrganisation"] or "apache" query = { - 'query': { - 'bool': { - 'must': [ - {'range': - { - 'tsday': { - 'from': t, - 'to': tf - } - } - }, - { - 'term': { - 'organisation': dOrg - } - } - ] - } - } + "query": { + "bool": { + "must": [ + {"range": {"tsday": {"from": t, "to": tf}}}, + {"term": {"organisation": dOrg}}, + ] } + } + } # Source-specific or view-specific?? - if indata.get('source'): - query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) + if indata.get("source"): + query["query"]["bool"]["must"].append( + {"term": {"sourceID": indata.get("source")}} + ) elif viewList: - query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) + query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}}) # Get an initial count of commits res = session.DB.ES.count( - index=session.DB.dbname, - doc_type="code_commit", - body = query - ) + index=session.DB.dbname, doc_type="code_commit", body=query + ) - globcount = res['count'] + globcount = res["count"] if globcount == 0 and not FoundSomething: continue FoundSomething = True # Get top 1000 committers this period - query['aggs'] = { - 'by_committer': { - 'terms': { - 'field': 'committer_email', - 'size': 25000 - } - }, - 'by_author': { - 'terms': { - 'field': 'author_email', - 'size': 25000 - } - } - } + query["aggs"] = { + "by_committer": {"terms": {"field": "committer_email", "size": 25000}}, + "by_author": {"terms": {"field": "author_email", "size": 25000}}, + } res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="code_commit", - size = 0, - body = query - ) - + index=session.DB.dbname, doc_type="code_commit", size=0, body=query + ) retained = 0 added = 0 lost = 0 thisPeriod = [] - for bucket in res['aggregations']['by_author']['buckets']: - who = bucket['key'] + for bucket in res["aggregations"]["by_author"]["buckets"]: + who = bucket["key"] thisPeriod.append(who) if who not in peopleSeen: peopleSeen[who] = tf @@ -201,7 +174,7 @@ def run(API, environ, indata, session): prune = [] for k, v in activePeople.items(): - if v < (t - (hl*30.45*86400)): + if v < (t - (hl * 30.45 * 86400)): prune.append(k) lost += 1 @@ -210,45 +183,48 @@ def run(API, environ, indata, session): del peopleSeen[who] retained = len(activePeople) - added - ts.append({ - 'date': tf, - 'People who (re)joined': added, - 'People who quit': lost, - 'People retained': retained, - 'Active people': added + retained - }) + ts.append( + { + "date": tf, + "People who (re)joined": added, + "People who quit": lost, + "People retained": retained, + "Active people": added + retained, + } + ) groups = [ - ['More than 5 years', (5*365*86400)+1], - ['2 - 5 years', (2*365*86400)+1], - ['1 - 2 years', (365*86400)], - ['Less than a year', 1] + ["More than 5 years", (5 * 365 * 86400) + 1], + ["2 - 5 years", (2 * 365 * 86400) + 1], + ["1 - 2 years", (365 * 86400)], + ["Less than a year", 1], ] counts = {} totExp = 0 for person, age in activePeople.items(): totExp += time.time() - allPeople[person] - for el in sorted(groups, key = lambda x: x[1], reverse = True): + for el in sorted(groups, key=lambda x: x[1], reverse=True): if allPeople[person] <= time.time() - el[1]: counts[el[0]] = counts.get(el[0], 0) + 1 break - avgyr = (totExp / (86400*365)) / max(len(activePeople),1) + avgyr = (totExp / (86400 * 365)) / max(len(activePeople), 1) - ts = sorted(ts, key = lambda x: x['date']) + ts = sorted(ts, key=lambda x: x["date"]) avgm = "" yr = int(avgyr) - ym = round((avgyr-yr)*12) + ym = round((avgyr - yr) * 12) if yr >= 1: avgm += "%u year%s" % (yr, "s" if yr != 1 else "") if ym > 0: avgm += "%s%u month%s" % (", " if yr > 0 else "", ym, "s" if ym != 1 else "") JSON_OUT = { - 'text': "This shows Contributor retention as calculated over a %u month timespan. The average experience of currently active people is %s." % (hl, avgm), - 'timeseries': ts, - 'counts': counts, - 'averageYears': avgyr, - 'okay': True, - 'responseTime': time.time() - now, + "text": "This shows Contributor retention as calculated over a %u month timespan. The average experience of currently active people is %s." + % (hl, avgm), + "timeseries": ts, + "counts": counts, + "averageYears": avgyr, + "okay": True, + "responseTime": time.time() - now, } yield json.dumps(JSON_OUT) diff --git a/kibble/api/pages/code/sloc.py b/kibble/api/pages/code/sloc.py index 9709fc3e..b221f953 100644 --- a/kibble/api/pages/code/sloc.py +++ b/kibble/api/pages/code/sloc.py @@ -1,4 +1,3 @@ - # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information @@ -62,80 +61,62 @@ ######################################################################## - - - """ This is the SLoC renderer for Kibble """ import json + def run(API, environ, indata, session): # We need to be logged in for this! if not session.user: raise API.exception(403, "You must be logged in to use this API endpoint! %s") - # First, fetch the view if we have such a thing enabled viewList = [] - if indata.get('view'): - viewList = session.getView(indata.get('view')) - if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - + if indata.get("view"): + viewList = session.getView(indata.get("view")) + if indata.get("subfilter"): + viewList = session.subFilter(indata.get("subfilter"), view=viewList) # Fetch all sources for default org - dOrg = session.user['defaultOrganisation'] or "apache" + dOrg = session.user["defaultOrganisation"] or "apache" query = { - 'query': { - 'bool': { - 'must': [ - { - 'terms': { - 'type': ['git', 'svn', 'github'] - } - }, - { - 'term': { - 'organisation': dOrg - } - } - ] - } - } + "query": { + "bool": { + "must": [ + {"terms": {"type": ["git", "svn", "github"]}}, + {"term": {"organisation": dOrg}}, + ] } + } + } # Source-specific or view-specific?? - if indata.get('source'): - query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) + if indata.get("source"): + query["query"]["bool"]["must"].append( + {"term": {"sourceID": indata.get("source")}} + ) elif viewList: - query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) + query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}}) res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="source", - size = 5000, - body = query - ) + index=session.DB.dbname, doc_type="source", size=5000, body=query + ) languages = {} years = 0 - for hit in res['hits']['hits']: - doc = hit['_source'] - if 'sloc' in doc: - sloc = doc['sloc'] - years += sloc['years'] - for k, v in sloc['languages'].items(): - languages[k] = languages.get(k, {'code': 0, 'comment': 0, 'blank': 0}) - languages[k]['code'] += v.get('code', 0) - languages[k]['comment'] += v.get('comment', 0) - languages[k]['blank'] += v.get('blank', 0) - - - JSON_OUT = { - 'languages': languages, - 'okay': True, - 'years': years - } + for hit in res["hits"]["hits"]: + doc = hit["_source"] + if "sloc" in doc: + sloc = doc["sloc"] + years += sloc["years"] + for k, v in sloc["languages"].items(): + languages[k] = languages.get(k, {"code": 0, "comment": 0, "blank": 0}) + languages[k]["code"] += v.get("code", 0) + languages[k]["comment"] += v.get("comment", 0) + languages[k]["blank"] += v.get("blank", 0) + + JSON_OUT = {"languages": languages, "okay": True, "years": years} yield json.dumps(JSON_OUT) diff --git a/kibble/api/pages/code/top-commits.py b/kibble/api/pages/code/top-commits.py index 3c782e97..9cda87af 100644 --- a/kibble/api/pages/code/top-commits.py +++ b/kibble/api/pages/code/top-commits.py @@ -1,4 +1,3 @@ - # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information @@ -62,9 +61,6 @@ ######################################################################## - - - """ This is the TopN repos by commits list renderer for Kibble """ @@ -73,6 +69,7 @@ import time import re + def run(API, environ, indata, session): # We need to be logged in for this! @@ -83,83 +80,69 @@ def run(API, environ, indata, session): # First, fetch the view if we have such a thing enabled viewList = [] - if indata.get('view'): - viewList = session.getView(indata.get('view')) - if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - + if indata.get("view"): + viewList = session.getView(indata.get("view")) + if indata.get("subfilter"): + viewList = session.subFilter(indata.get("subfilter"), view=viewList) - dateTo = indata.get('to', int(time.time())) - dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span + dateTo = indata.get("to", int(time.time())) + dateFrom = indata.get( + "from", dateTo - (86400 * 30 * 6) + ) # Default to a 6 month span #################################################################### #################################################################### - dOrg = session.user['defaultOrganisation'] or "apache" + dOrg = session.user["defaultOrganisation"] or "apache" query = { - 'query': { - 'bool': { - 'must': [ - {'range': - { - 'tsday': { - 'from': dateFrom, - 'to': dateTo - } - } - }, - { - 'term': { - 'organisation': dOrg - } - } - ] - } - } + "query": { + "bool": { + "must": [ + {"range": {"tsday": {"from": dateFrom, "to": dateTo}}}, + {"term": {"organisation": dOrg}}, + ] } + } + } # Source-specific or view-specific?? - if indata.get('source'): - query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) + if indata.get("source"): + query["query"]["bool"]["must"].append( + {"term": {"sourceID": indata.get("source")}} + ) elif viewList: - query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) - if indata.get('email'): - query['query']['bool']['should'] = [{'term': {'committer_email': indata.get('email')}}, {'term': {'author_email': indata.get('email')}}] - query['query']['bool']['minimum_should_match'] = 1 + query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}}) + if indata.get("email"): + query["query"]["bool"]["should"] = [ + {"term": {"committer_email": indata.get("email")}}, + {"term": {"author_email": indata.get("email")}}, + ] + query["query"]["bool"]["minimum_should_match"] = 1 # Path filter? - if indata.get('pathfilter'): - pf = indata.get('pathfilter') - if '!' in pf: - pf = pf.replace('!', '') - query['query']['bool']['must_not'] = query['query']['bool'].get('must_not', []) - query['query']['bool']['must_not'].append({'regexp': {'files_changed': pf}}) + if indata.get("pathfilter"): + pf = indata.get("pathfilter") + if "!" in pf: + pf = pf.replace("!", "") + query["query"]["bool"]["must_not"] = query["query"]["bool"].get( + "must_not", [] + ) + query["query"]["bool"]["must_not"].append({"regexp": {"files_changed": pf}}) else: - query['query']['bool']['must'].append({'regexp': {'files_changed': pf}}) - + query["query"]["bool"]["must"].append({"regexp": {"files_changed": pf}}) # Get top 25 committers this period - query['aggs'] = { - 'by_repo': { - 'terms': { - 'field': 'sourceURL', - 'size': 5000 - } - } - } + query["aggs"] = {"by_repo": {"terms": {"field": "sourceURL", "size": 5000}}} res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="code_commit", - size = 0, - body = query - ) + index=session.DB.dbname, doc_type="code_commit", size=0, body=query + ) toprepos = [] - for bucket in res['aggregations']['by_repo']['buckets']: - repo = re.sub(r".+/([^/]+?)(?:\.git)?$", r"\1", bucket['key']) - count = bucket['doc_count'] + for bucket in res["aggregations"]["by_repo"]["buckets"]: + repo = re.sub(r".+/([^/]+?)(?:\.git)?$", r"\1", bucket["key"]) + count = bucket["doc_count"] toprepos.append([repo, count]) - toprepos = sorted(toprepos, key = lambda x: x[1], reverse = True) + toprepos = sorted(toprepos, key=lambda x: x[1], reverse=True) top = toprepos[0:24] if len(toprepos) > 25: count = 0 @@ -171,9 +154,5 @@ def run(API, environ, indata, session): for v in top: tophash[v[0]] = v[1] - JSON_OUT = { - 'counts': tophash, - 'okay': True, - 'responseTime': time.time() - now, - } + JSON_OUT = {"counts": tophash, "okay": True, "responseTime": time.time() - now} yield json.dumps(JSON_OUT) diff --git a/kibble/api/pages/code/top-sloc.py b/kibble/api/pages/code/top-sloc.py index 6beafaac..6950ca0f 100644 --- a/kibble/api/pages/code/top-sloc.py +++ b/kibble/api/pages/code/top-sloc.py @@ -1,4 +1,3 @@ - # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information @@ -62,9 +61,6 @@ ######################################################################## - - - """ This is the TopN repos by SLoC list renderer for Kibble """ @@ -73,6 +69,7 @@ import time import re + def run(API, environ, indata, session): # We need to be logged in for this! @@ -83,57 +80,47 @@ def run(API, environ, indata, session): # First, fetch the view if we have such a thing enabled viewList = [] - if indata.get('view'): - viewList = session.getView(indata.get('view')) - if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - + if indata.get("view"): + viewList = session.getView(indata.get("view")) + if indata.get("subfilter"): + viewList = session.subFilter(indata.get("subfilter"), view=viewList) #################################################################### #################################################################### - dOrg = session.user['defaultOrganisation'] or "apache" + dOrg = session.user["defaultOrganisation"] or "apache" query = { - 'query': { - 'bool': { - 'must': [ - {'terms': - { - 'type': ['git', 'svn', 'github'] - } - }, - { - 'term': { - 'organisation': dOrg - } - } - ] - } - } + "query": { + "bool": { + "must": [ + {"terms": {"type": ["git", "svn", "github"]}}, + {"term": {"organisation": dOrg}}, + ] } + } + } # Source-specific or view-specific?? - if indata.get('source'): - query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) + if indata.get("source"): + query["query"]["bool"]["must"].append( + {"term": {"sourceID": indata.get("source")}} + ) elif viewList: - query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) + query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}}) res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="source", - size = 5000, - body = query - ) + index=session.DB.dbname, doc_type="source", size=5000, body=query + ) toprepos = [] - for doc in res['hits']['hits']: - repo = doc['_source'] - url = re.sub(r".+/([^/]+?)(?:\.git)?$", r"\1", repo['sourceURL']) - if 'sloc' in repo: - count = repo['sloc'].get('loc', 0) + for doc in res["hits"]["hits"]: + repo = doc["_source"] + url = re.sub(r".+/([^/]+?)(?:\.git)?$", r"\1", repo["sourceURL"]) + if "sloc" in repo: + count = repo["sloc"].get("loc", 0) if not count: count = 0 toprepos.append([url, count]) - toprepos = sorted(toprepos, key = lambda x: int(x[1]), reverse = True) + toprepos = sorted(toprepos, key=lambda x: int(x[1]), reverse=True) top = toprepos[0:24] if len(toprepos) > 25: count = 0 @@ -145,9 +132,5 @@ def run(API, environ, indata, session): for v in top: tophash[v[0]] = v[1] - JSON_OUT = { - 'counts': tophash, - 'okay': True, - 'responseTime': time.time() - now, - } + JSON_OUT = {"counts": tophash, "okay": True, "responseTime": time.time() - now} yield json.dumps(JSON_OUT) diff --git a/kibble/api/pages/code/trends.py b/kibble/api/pages/code/trends.py index da1803ca..adb7f1e0 100644 --- a/kibble/api/pages/code/trends.py +++ b/kibble/api/pages/code/trends.py @@ -1,4 +1,3 @@ - # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information @@ -62,9 +61,6 @@ ######################################################################## - - - """ This is the SLoC renderer for Kibble """ @@ -72,6 +68,7 @@ import json import time + def run(API, environ, indata, session): # We need to be logged in for this! @@ -82,268 +79,184 @@ def run(API, environ, indata, session): # First, fetch the view if we have such a thing enabled viewList = [] - if indata.get('view'): - viewList = session.getView(indata.get('view')) - if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - - - dateTo = indata.get('to', int(time.time())) - dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span + if indata.get("view"): + viewList = session.getView(indata.get("view")) + if indata.get("subfilter"): + viewList = session.subFilter(indata.get("subfilter"), view=viewList) + + dateTo = indata.get("to", int(time.time())) + dateFrom = indata.get( + "from", dateTo - (86400 * 30 * 6) + ) # Default to a 6 month span if dateFrom < 0: dateFrom = 0 dateYonder = dateFrom - (dateTo - dateFrom) - - #################################################################### # We start by doing all the queries for THIS period. # # Then we reset the query, and change date to yonder-->from # # and rerun the same queries. # #################################################################### - dOrg = session.user['defaultOrganisation'] or "apache" + dOrg = session.user["defaultOrganisation"] or "apache" query = { - 'query': { - 'bool': { - 'must': [ - {'range': - { - 'tsday': { - 'from': dateFrom, - 'to': dateTo - } - } - }, - { - 'term': { - 'organisation': dOrg - } - } - ] - } - } + "query": { + "bool": { + "must": [ + {"range": {"tsday": {"from": dateFrom, "to": dateTo}}}, + {"term": {"organisation": dOrg}}, + ] } + } + } # Source-specific or view-specific?? - if indata.get('source'): - query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) + if indata.get("source"): + query["query"]["bool"]["must"].append( + {"term": {"sourceID": indata.get("source")}} + ) elif viewList: - query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) - if indata.get('email'): - query['query']['bool']['should'] = [{'term': {'committer_email': indata.get('email')}}, {'term': {'author_email': indata.get('email')}}] - query['query']['bool']['minimum_should_match'] = 1 + query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}}) + if indata.get("email"): + query["query"]["bool"]["should"] = [ + {"term": {"committer_email": indata.get("email")}}, + {"term": {"author_email": indata.get("email")}}, + ] + query["query"]["bool"]["minimum_should_match"] = 1 # Path filter? - if indata.get('pathfilter'): - pf = indata.get('pathfilter') - if '!' in pf: - pf = pf.replace('!', '') - query['query']['bool']['must_not'] = query['query']['bool'].get('must_not', []) - query['query']['bool']['must_not'].append({'regexp': {'files_changed': pf}}) + if indata.get("pathfilter"): + pf = indata.get("pathfilter") + if "!" in pf: + pf = pf.replace("!", "") + query["query"]["bool"]["must_not"] = query["query"]["bool"].get( + "must_not", [] + ) + query["query"]["bool"]["must_not"].append({"regexp": {"files_changed": pf}}) else: - query['query']['bool']['must'].append({'regexp': {'files_changed': pf}}) + query["query"]["bool"]["must"].append({"regexp": {"files_changed": pf}}) # Get number of commits, this period res = session.DB.ES.count( - index=session.DB.dbname, - doc_type="code_commit", - body = query - ) - no_commits = res['count'] - + index=session.DB.dbname, doc_type="code_commit", body=query + ) + no_commits = res["count"] # Get number of committers, this period - query['aggs'] = { - 'commits': { - 'cardinality': { - 'field': 'committer_email' - } - }, - 'authors': { - 'cardinality': { - 'field': 'author_email' - } - } - - } + query["aggs"] = { + "commits": {"cardinality": {"field": "committer_email"}}, + "authors": {"cardinality": {"field": "author_email"}}, + } res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="code_commit", - size = 0, - body = query - ) - no_committers = res['aggregations']['commits']['value'] - no_authors = res['aggregations']['authors']['value'] - + index=session.DB.dbname, doc_type="code_commit", size=0, body=query + ) + no_committers = res["aggregations"]["commits"]["value"] + no_authors = res["aggregations"]["authors"]["value"] # Get number of insertions, this period - query['aggs'] = { - 'changes': { - 'sum': { - 'field': 'insertions' - } - } - } + query["aggs"] = {"changes": {"sum": {"field": "insertions"}}} res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="code_commit", - size = 0, - body = query - ) - insertions = res['aggregations']['changes']['value'] + index=session.DB.dbname, doc_type="code_commit", size=0, body=query + ) + insertions = res["aggregations"]["changes"]["value"] # Get number of deletions, this period - query['aggs'] = { - 'changes': { - 'sum': { - 'field': 'deletions' - } - } - } + query["aggs"] = {"changes": {"sum": {"field": "deletions"}}} res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="code_commit", - size = 0, - body = query - ) - deletions = res['aggregations']['changes']['value'] - + index=session.DB.dbname, doc_type="code_commit", size=0, body=query + ) + deletions = res["aggregations"]["changes"]["value"] #################################################################### # Change to PRIOR SPAN # #################################################################### - dOrg = session.user['defaultOrganisation'] or "apache" + dOrg = session.user["defaultOrganisation"] or "apache" query = { - 'query': { - 'bool': { - 'must': [ - {'range': - { - 'tsday': { - 'from': dateYonder, - 'to': dateFrom-1 - } - } - }, - { - 'term': { - 'organisation': dOrg - } - } - ] - } - } + "query": { + "bool": { + "must": [ + {"range": {"tsday": {"from": dateYonder, "to": dateFrom - 1}}}, + {"term": {"organisation": dOrg}}, + ] } + } + } # Source-specific or view-specific?? - if indata.get('source'): - query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) + if indata.get("source"): + query["query"]["bool"]["must"].append( + {"term": {"sourceID": indata.get("source")}} + ) elif viewList: - query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) + query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}}) # Path filter? - if indata.get('pathfilter'): - pf = indata.get('pathfilter') - if '!' in pf: - pf = pf.replace('!', '') - query['query']['bool']['must_not'] = query['query']['bool'].get('must_not', []) - query['query']['bool']['must_not'].append({'regexp': {'files_changed': pf}}) + if indata.get("pathfilter"): + pf = indata.get("pathfilter") + if "!" in pf: + pf = pf.replace("!", "") + query["query"]["bool"]["must_not"] = query["query"]["bool"].get( + "must_not", [] + ) + query["query"]["bool"]["must_not"].append({"regexp": {"files_changed": pf}}) else: - query['query']['bool']['must'].append({'regexp': {'files_changed': pf}}) - + query["query"]["bool"]["must"].append({"regexp": {"files_changed": pf}}) # Get number of commits, this period res = session.DB.ES.count( - index=session.DB.dbname, - doc_type="code_commit", - body = query - ) - no_commits_before = res['count'] + index=session.DB.dbname, doc_type="code_commit", body=query + ) + no_commits_before = res["count"] # Get number of committers, this period - query['aggs'] = { - 'commits': { - 'cardinality': { - 'field': 'committer_email' - } - }, - 'authors': { - 'cardinality': { - 'field': 'author_email' - } - } - } + query["aggs"] = { + "commits": {"cardinality": {"field": "committer_email"}}, + "authors": {"cardinality": {"field": "author_email"}}, + } res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="code_commit", - size = 0, - body = query - ) - no_committers_before = res['aggregations']['commits']['value'] - no_authors_before = res['aggregations']['authors']['value'] + index=session.DB.dbname, doc_type="code_commit", size=0, body=query + ) + no_committers_before = res["aggregations"]["commits"]["value"] + no_authors_before = res["aggregations"]["authors"]["value"] # Get number of insertions, this period - query['aggs'] = { - 'changes': { - 'sum': { - 'field': 'insertions' - } - } - } + query["aggs"] = {"changes": {"sum": {"field": "insertions"}}} res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="code_commit", - size = 0, - body = query - ) - insertions_before = res['aggregations']['changes']['value'] + index=session.DB.dbname, doc_type="code_commit", size=0, body=query + ) + insertions_before = res["aggregations"]["changes"]["value"] - # Get number of deletions, this period - query['aggs'] = { - 'changes': { - 'sum': { - 'field': 'deletions' - } - } - } + # Get number of deletions, this period + query["aggs"] = {"changes": {"sum": {"field": "deletions"}}} res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="code_commit", - size = 0, - body = query - ) - deletions_before = res['aggregations']['changes']['value'] - - + index=session.DB.dbname, doc_type="code_commit", size=0, body=query + ) + deletions_before = res["aggregations"]["changes"]["value"] trends = { "committers": { - 'before': no_committers_before, - 'after': no_committers, - 'title': "Committers this period" + "before": no_committers_before, + "after": no_committers, + "title": "Committers this period", }, "authors": { - 'before': no_authors_before, - 'after': no_authors, - 'title': "Authors this period" + "before": no_authors_before, + "after": no_authors, + "title": "Authors this period", }, - 'commits': { - 'before': no_commits_before, - 'after': no_commits, - 'title': "Commits this period" + "commits": { + "before": no_commits_before, + "after": no_commits, + "title": "Commits this period", + }, + "changes": { + "before": insertions_before + deletions_before, + "after": insertions + deletions, + "title": "Lines changed this period", }, - 'changes': { - 'before': insertions_before + deletions_before, - 'after': insertions + deletions, - 'title': "Lines changed this period" - } } - JSON_OUT = { - 'trends': trends, - 'okay': True, - 'responseTime': time.time() - now - } + JSON_OUT = {"trends": trends, "okay": True, "responseTime": time.time() - now} yield json.dumps(JSON_OUT) + """ commits = { before = pcommits, diff --git a/kibble/api/pages/filters.py b/kibble/api/pages/filters.py index cedf96e6..32b1360c 100644 --- a/kibble/api/pages/filters.py +++ b/kibble/api/pages/filters.py @@ -23,6 +23,7 @@ import re import time + def run(API, environ, indata, session): # We need to be logged in for this! @@ -30,36 +31,26 @@ def run(API, environ, indata, session): raise API.exception(403, "You must be logged in to use this API endpoint! %s") # Fetch all sources for default org - dOrg = session.user['defaultOrganisation'] or "apache" + dOrg = session.user["defaultOrganisation"] or "apache" res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="view", - size = 5000, - body = { - 'query': { - 'term': { - 'owner': session.user['email'] - } - } - } - ) + index=session.DB.dbname, + doc_type="view", + size=5000, + body={"query": {"term": {"owner": session.user["email"]}}}, + ) sources = [] - for hit in res['hits']['hits']: - doc = hit['_source'] - if indata.get('quick'): + for hit in res["hits"]["hits"]: + doc = hit["_source"] + if indata.get("quick"): xdoc = { - 'sourceID': doc['sourceID'], - 'type': doc['type'], - 'sourceURL': doc['sourceURL'] - } + "sourceID": doc["sourceID"], + "type": doc["type"], + "sourceURL": doc["sourceURL"], + } sources.append(xdoc) else: sources.append(doc) - JSON_OUT = { - 'views': sources, - 'okay': True, - 'organisation': dOrg - } + JSON_OUT = {"views": sources, "okay": True, "organisation": dOrg} yield json.dumps(JSON_OUT) diff --git a/kibble/api/pages/forum/actors.py b/kibble/api/pages/forum/actors.py index ede9f6a4..a9b1d18f 100644 --- a/kibble/api/pages/forum/actors.py +++ b/kibble/api/pages/forum/actors.py @@ -1,4 +1,3 @@ - # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information @@ -62,9 +61,6 @@ ######################################################################## - - - """ This is the forum actors stats page for Kibble """ @@ -73,6 +69,7 @@ import time import hashlib + def run(API, environ, indata, session): # We need to be logged in for this! @@ -83,152 +80,107 @@ def run(API, environ, indata, session): # First, fetch the view if we have such a thing enabled viewList = [] - if indata.get('view'): - viewList = session.getView(indata.get('view')) - if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - + if indata.get("view"): + viewList = session.getView(indata.get("view")) + if indata.get("subfilter"): + viewList = session.subFilter(indata.get("subfilter"), view=viewList) - dateTo = indata.get('to', int(time.time())) - dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span - - interval = indata.get('interval', 'month') + dateTo = indata.get("to", int(time.time())) + dateFrom = indata.get( + "from", dateTo - (86400 * 30 * 6) + ) # Default to a 6 month span + interval = indata.get("interval", "month") #################################################################### #################################################################### - dOrg = session.user['defaultOrganisation'] or "apache" + dOrg = session.user["defaultOrganisation"] or "apache" query = { - 'query': { - 'bool': { - 'must': [ - {'range': - { - 'created': { - 'from': dateFrom, - 'to': dateTo - } - } - }, - { - 'term': { - 'organisation': dOrg - } - } - ] - } - } + "query": { + "bool": { + "must": [ + {"range": {"created": {"from": dateFrom, "to": dateTo}}}, + {"term": {"organisation": dOrg}}, + ] } + } + } # Source-specific or view-specific?? - if indata.get('source'): - query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) + if indata.get("source"): + query["query"]["bool"]["must"].append( + {"term": {"sourceID": indata.get("source")}} + ) elif viewList: - query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) - if indata.get('email'): - query['query']['bool']['should'] = [{'term': {'issueCreator': indata.get('email')}}] + query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}}) + if indata.get("email"): + query["query"]["bool"]["should"] = [ + {"term": {"issueCreator": indata.get("email")}} + ] # Get timeseries for this period - query['aggs'] = { - 'per_interval': { - 'date_histogram': { - 'field': 'createdDate', - 'interval': interval - }, - 'aggs': { - 'by_user': { - 'cardinality': { - 'field': 'creator' - } - } - } - } + query["aggs"] = { + "per_interval": { + "date_histogram": {"field": "createdDate", "interval": interval}, + "aggs": {"by_user": {"cardinality": {"field": "creator"}}}, } + } res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="forum_post", - size = 0, - body = query - ) + index=session.DB.dbname, doc_type="forum_post", size=0, body=query + ) timeseries = {} - for bucket in res['aggregations']['per_interval']['buckets']: - ts = int(bucket['key'] / 1000) - ccount = bucket['by_user']['value'] - timeseries[ts] = { - 'date': ts, - 'topic responders': ccount, - 'topic creators': 0 - } - + for bucket in res["aggregations"]["per_interval"]["buckets"]: + ts = int(bucket["key"] / 1000) + ccount = bucket["by_user"]["value"] + timeseries[ts] = {"date": ts, "topic responders": ccount, "topic creators": 0} #################################################################### #################################################################### - dOrg = session.user['defaultOrganisation'] or "apache" + dOrg = session.user["defaultOrganisation"] or "apache" query = { - 'query': { - 'bool': { - 'must': [ - {'range': - { - 'created': { - 'from': dateFrom, - 'to': dateTo - } - } - }, - { - 'term': { - 'organisation': dOrg - } - } - ] - } - } + "query": { + "bool": { + "must": [ + {"range": {"created": {"from": dateFrom, "to": dateTo}}}, + {"term": {"organisation": dOrg}}, + ] } + } + } # Source-specific or view-specific?? - if indata.get('source'): - query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) + if indata.get("source"): + query["query"]["bool"]["must"].append( + {"term": {"sourceID": indata.get("source")}} + ) elif viewList: - query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) - if indata.get('email'): - query['query']['bool']['should'] = [{'term': {'creator': indata.get('email')}}] + query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}}) + if indata.get("email"): + query["query"]["bool"]["should"] = [{"term": {"creator": indata.get("email")}}] # Get timeseries for this period - query['aggs'] = { - 'per_interval': { - 'date_histogram': { - 'field': 'createdDate', - 'interval': interval - }, - 'aggs': { - 'by_user': { - 'cardinality': { - 'field': 'creator' - } - } - } - } + query["aggs"] = { + "per_interval": { + "date_histogram": {"field": "createdDate", "interval": interval}, + "aggs": {"by_user": {"cardinality": {"field": "creator"}}}, } + } res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="forum_topic", - size = 0, - body = query - ) + index=session.DB.dbname, doc_type="forum_topic", size=0, body=query + ) - for bucket in res['aggregations']['per_interval']['buckets']: - ts = int(bucket['key'] / 1000) - ccount = bucket['by_user']['value'] + for bucket in res["aggregations"]["per_interval"]["buckets"]: + ts = int(bucket["key"] / 1000) + ccount = bucket["by_user"]["value"] if ts in timeseries: - timeseries[ts]['topic creators'] = ccount + timeseries[ts]["topic creators"] = ccount else: timeseries[ts] = { - 'date': ts, - 'topic creators': 0, - 'topic responders': ccount + "date": ts, + "topic creators": 0, + "topic responders": ccount, } ts = [] @@ -236,11 +188,9 @@ def run(API, environ, indata, session): ts.append(el) JSON_OUT = { - 'timeseries': ts, - 'okay': True, - 'responseTime': time.time() - now, - 'widgetType': { - 'chartType': 'bar' - } + "timeseries": ts, + "okay": True, + "responseTime": time.time() - now, + "widgetType": {"chartType": "bar"}, } yield json.dumps(JSON_OUT) diff --git a/kibble/api/pages/forum/creators.py b/kibble/api/pages/forum/creators.py index f5f92703..1a9e8b6d 100644 --- a/kibble/api/pages/forum/creators.py +++ b/kibble/api/pages/forum/creators.py @@ -1,4 +1,3 @@ - # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information @@ -62,9 +61,6 @@ ######################################################################## - - - """ This is the TopN issue openers list renderer for Kibble """ @@ -73,6 +69,7 @@ import time import hashlib + def run(API, environ, indata, session): # We need to be logged in for this! @@ -83,101 +80,78 @@ def run(API, environ, indata, session): # First, fetch the view if we have such a thing enabled viewList = [] - if indata.get('view'): - viewList = session.getView(indata.get('view')) - if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - + if indata.get("view"): + viewList = session.getView(indata.get("view")) + if indata.get("subfilter"): + viewList = session.subFilter(indata.get("subfilter"), view=viewList) - dateTo = indata.get('to', int(time.time())) - dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span + dateTo = indata.get("to", int(time.time())) + dateFrom = indata.get( + "from", dateTo - (86400 * 30 * 6) + ) # Default to a 6 month span - interval = indata.get('interval', 'month') + interval = indata.get("interval", "month") xtitle = None #################################################################### #################################################################### - dOrg = session.user['defaultOrganisation'] or "apache" + dOrg = session.user["defaultOrganisation"] or "apache" query = { - 'query': { - 'bool': { - 'must': [ - {'range': - { - 'created': { - 'from': dateFrom, - 'to': dateTo - } - } - }, - { - 'term': { - 'organisation': dOrg - } - } - ] - } - } + "query": { + "bool": { + "must": [ + {"range": {"created": {"from": dateFrom, "to": dateTo}}}, + {"term": {"organisation": dOrg}}, + ] } + } + } # Source-specific or view-specific?? - if indata.get('source'): - query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) + if indata.get("source"): + query["query"]["bool"]["must"].append( + {"term": {"sourceID": indata.get("source")}} + ) elif viewList: - query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) - if indata.get('email'): - query['query']['bool']['must'].append({'term': {'creator': indata.get('email')}}) - xtitle = "People opening issues solved by %s" % indata.get('email') + query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}}) + if indata.get("email"): + query["query"]["bool"]["must"].append( + {"term": {"creator": indata.get("email")}} + ) + xtitle = "People opening issues solved by %s" % indata.get("email") # Get top 25 committers this period - query['aggs'] = { - 'committers': { - 'terms': { - 'field': 'creator', - 'size': 25 - }, - 'aggs': { - - } - } + query["aggs"] = { + "committers": {"terms": {"field": "creator", "size": 25}, "aggs": {}} } res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="forum_topic", - size = 0, - body = query - ) + index=session.DB.dbname, doc_type="forum_topic", size=0, body=query + ) people = {} - for bucket in res['aggregations']['committers']['buckets']: - email = bucket['key'] - count = bucket['doc_count'] + for bucket in res["aggregations"]["committers"]["buckets"]: + email = bucket["key"] + count = bucket["doc_count"] sha = email - if session.DB.ES.exists(index=session.DB.dbname,doc_type="person",id = sha): + if session.DB.ES.exists(index=session.DB.dbname, doc_type="person", id=sha): pres = session.DB.ES.get( - index=session.DB.dbname, - doc_type="person", - id = email - ) - person = pres['_source'] - person['name'] = person.get('name', 'unknown') + index=session.DB.dbname, doc_type="person", id=email + ) + person = pres["_source"] + person["name"] = person.get("name", "unknown") people[email] = person - people[email]['gravatar'] = hashlib.md5(person.get('email', 'unknown').encode('utf-8')).hexdigest() - people[email]['count'] = count + people[email]["gravatar"] = hashlib.md5( + person.get("email", "unknown").encode("utf-8") + ).hexdigest() + people[email]["count"] = count topN = [] for email, person in people.items(): topN.append(person) - topN = sorted(topN, key = lambda x: x['count'], reverse = True) + topN = sorted(topN, key=lambda x: x["count"], reverse=True) JSON_OUT = { - 'topN': { - 'denoter': 'topics created', - 'items': topN, - }, - 'okay': True, - 'responseTime': time.time() - now, - 'widgetType': { - 'chartType': 'bar', - 'title': xtitle - } + "topN": {"denoter": "topics created", "items": topN}, + "okay": True, + "responseTime": time.time() - now, + "widgetType": {"chartType": "bar", "title": xtitle}, } yield json.dumps(JSON_OUT) diff --git a/kibble/api/pages/forum/issues.py b/kibble/api/pages/forum/issues.py index b4f7fc0d..8c4bbe8f 100644 --- a/kibble/api/pages/forum/issues.py +++ b/kibble/api/pages/forum/issues.py @@ -1,4 +1,3 @@ - # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information @@ -62,9 +61,6 @@ ######################################################################## - - - """ This is the forum timeseries renderer for Kibble """ @@ -78,10 +74,11 @@ def makeTS(dist): ts = {} for k in dist: - ts[k + ' topics'] = 0 - ts[k + ' replies'] = 0 + ts[k + " topics"] = 0 + ts[k + " replies"] = 0 return ts + def run(API, environ, indata, session): # We need to be logged in for this! @@ -92,27 +89,26 @@ def run(API, environ, indata, session): # First, fetch the view if we have such a thing enabled viewList = [] - if indata.get('view'): - viewList = session.getView(indata.get('view')) - if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - + if indata.get("view"): + viewList = session.getView(indata.get("view")) + if indata.get("subfilter"): + viewList = session.subFilter(indata.get("subfilter"), view=viewList) - dateTo = indata.get('to', int(time.time())) - dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span + dateTo = indata.get("to", int(time.time())) + dateFrom = indata.get( + "from", dateTo - (86400 * 30 * 6) + ) # Default to a 6 month span - interval = indata.get('interval', 'month') + interval = indata.get("interval", "month") # By default, we lump generic forums and question/answer (like SO, askbot) together as one - distinct = { - 'forum': ['discourse', 'stackoverflow', 'askbot'] - } + distinct = {"forum": ["discourse", "stackoverflow", "askbot"]} # If requested, we split them into two - if indata.get('distinguish', False): + if indata.get("distinguish", False): distinct = { - 'forum': ['discourse'], - 'question bank': ['stackoverflow', 'askbot'] + "forum": ["discourse"], + "question bank": ["stackoverflow", "askbot"], } timeseries = {} @@ -123,138 +119,106 @@ def run(API, environ, indata, session): #################################################################### # ISSUES OPENED # #################################################################### - dOrg = session.user['defaultOrganisation'] or "apache" + dOrg = session.user["defaultOrganisation"] or "apache" query = { - 'query': { - 'bool': { - 'must': [ - {'range': - { - 'created': { - 'from': dateFrom, - 'to': dateTo - } - } - }, - { - 'term': { - 'organisation': dOrg - } - }, - { - 'terms': { - 'type': iValues - } - } - ] - } - } + "query": { + "bool": { + "must": [ + {"range": {"created": {"from": dateFrom, "to": dateTo}}}, + {"term": {"organisation": dOrg}}, + {"terms": {"type": iValues}}, + ] } + } + } # Source-specific or view-specific?? - if indata.get('source'): - query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) + if indata.get("source"): + query["query"]["bool"]["must"].append( + {"term": {"sourceID": indata.get("source")}} + ) elif viewList: - query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) - if indata.get('email'): - query['query']['bool']['must'].append({'term': {'creator': indata.get('email')}}) + query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}}) + if indata.get("email"): + query["query"]["bool"]["must"].append( + {"term": {"creator": indata.get("email")}} + ) # Get number of opened ones, this period - query['aggs'] = { - 'commits': { - 'date_histogram': { - 'field': 'createdDate', - 'interval': interval - } - } + query["aggs"] = { + "commits": { + "date_histogram": {"field": "createdDate", "interval": interval} } + } res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="forum_topic", - size = 0, - body = query - ) + index=session.DB.dbname, doc_type="forum_topic", size=0, body=query + ) - for bucket in res['aggregations']['commits']['buckets']: - ts = int(bucket['key'] / 1000) - count = bucket['doc_count'] + for bucket in res["aggregations"]["commits"]["buckets"]: + ts = int(bucket["key"] / 1000) + count = bucket["doc_count"] timeseries[ts] = timeseries.get(ts, makeTS(distinct)) - timeseries[ts][iType + ' topics'] = timeseries[ts].get(iType + ' topics', 0) + count - + timeseries[ts][iType + " topics"] = ( + timeseries[ts].get(iType + " topics", 0) + count + ) #################################################################### # ISSUES CLOSED # #################################################################### - dOrg = session.user['defaultOrganisation'] or "apache" + dOrg = session.user["defaultOrganisation"] or "apache" query = { - 'query': { - 'bool': { - 'must': [ - {'range': - { - 'created': { - 'from': dateFrom, - 'to': dateTo - } - } - }, - { - 'term': { - 'organisation': dOrg - } - }, - { - 'terms': { - 'type': iValues - } - } - ] - } - } + "query": { + "bool": { + "must": [ + {"range": {"created": {"from": dateFrom, "to": dateTo}}}, + {"term": {"organisation": dOrg}}, + {"terms": {"type": iValues}}, + ] } + } + } if viewList: - query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) - if indata.get('source'): - query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) - if indata.get('email'): - query['query']['bool']['must'].append({'term': {'creator': indata.get('email')}}) + query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}}) + if indata.get("source"): + query["query"]["bool"]["must"].append( + {"term": {"sourceID": indata.get("source")}} + ) + if indata.get("email"): + query["query"]["bool"]["must"].append( + {"term": {"creator": indata.get("email")}} + ) # Get number of closed ones, this period - query['aggs'] = { - 'commits': { - 'date_histogram': { - 'field': 'createdDate', - 'interval': interval - } - } + query["aggs"] = { + "commits": { + "date_histogram": {"field": "createdDate", "interval": interval} } + } res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="forum_post", - size = 0, - body = query - ) + index=session.DB.dbname, doc_type="forum_post", size=0, body=query + ) - for bucket in res['aggregations']['commits']['buckets']: - ts = int(bucket['key'] / 1000) - count = bucket['doc_count'] + for bucket in res["aggregations"]["commits"]["buckets"]: + ts = int(bucket["key"] / 1000) + count = bucket["doc_count"] timeseries[ts] = timeseries.get(ts, makeTS(distinct)) - timeseries[ts][iType + ' replies'] = timeseries[ts].get(iType + ' replies', 0) + count + timeseries[ts][iType + " replies"] = ( + timeseries[ts].get(iType + " replies", 0) + count + ) ts = [] for k, v in timeseries.items(): - v['date'] = k + v["date"] = k ts.append(v) - JSON_OUT = { - 'widgetType': { - 'chartType': 'line', # Recommendation for the UI - 'nofill': True + "widgetType": { + "chartType": "line", # Recommendation for the UI + "nofill": True, }, - 'timeseries': ts, - 'interval': interval, - 'okay': True, - 'distinguishable': True, - 'responseTime': time.time() - now + "timeseries": ts, + "interval": interval, + "okay": True, + "distinguishable": True, + "responseTime": time.time() - now, } yield json.dumps(JSON_OUT) diff --git a/kibble/api/pages/forum/responders.py b/kibble/api/pages/forum/responders.py index 0ffa5a3b..a25481db 100644 --- a/kibble/api/pages/forum/responders.py +++ b/kibble/api/pages/forum/responders.py @@ -1,4 +1,3 @@ - # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information @@ -62,9 +61,6 @@ ######################################################################## - - - """ This is the TopN forum posters list renderer for Kibble """ @@ -73,6 +69,7 @@ import time import hashlib + def run(API, environ, indata, session): # We need to be logged in for this! @@ -83,102 +80,78 @@ def run(API, environ, indata, session): # First, fetch the view if we have such a thing enabled viewList = [] - if indata.get('view'): - viewList = session.getView(indata.get('view')) - if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - + if indata.get("view"): + viewList = session.getView(indata.get("view")) + if indata.get("subfilter"): + viewList = session.subFilter(indata.get("subfilter"), view=viewList) - dateTo = indata.get('to', int(time.time())) - dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span + dateTo = indata.get("to", int(time.time())) + dateFrom = indata.get( + "from", dateTo - (86400 * 30 * 6) + ) # Default to a 6 month span - interval = indata.get('interval', 'month') + interval = indata.get("interval", "month") xtitle = None - #################################################################### #################################################################### - dOrg = session.user['defaultOrganisation'] or "apache" + dOrg = session.user["defaultOrganisation"] or "apache" query = { - 'query': { - 'bool': { - 'must': [ - {'range': - { - 'created': { - 'from': dateFrom, - 'to': dateTo - } - } - }, - { - 'term': { - 'organisation': dOrg - } - } - ] - } - } + "query": { + "bool": { + "must": [ + {"range": {"created": {"from": dateFrom, "to": dateTo}}}, + {"term": {"organisation": dOrg}}, + ] } + } + } # Source-specific or view-specific?? - if indata.get('source'): - query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) + if indata.get("source"): + query["query"]["bool"]["must"].append( + {"term": {"sourceID": indata.get("source")}} + ) elif viewList: - query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) - if indata.get('email'): - query['query']['bool']['must'].append({'term': {'creator': indata.get('email')}}) - xTitle = "People closing %s's issues" % indata.get('email') + query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}}) + if indata.get("email"): + query["query"]["bool"]["must"].append( + {"term": {"creator": indata.get("email")}} + ) + xTitle = "People closing %s's issues" % indata.get("email") # Get top 25 committers this period - query['aggs'] = { - 'committers': { - 'terms': { - 'field': 'creator', - 'size': 25 - }, - 'aggs': { - - } - } + query["aggs"] = { + "committers": {"terms": {"field": "creator", "size": 25}, "aggs": {}} } res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="forum_post", - size = 0, - body = query - ) + index=session.DB.dbname, doc_type="forum_post", size=0, body=query + ) people = {} - for bucket in res['aggregations']['committers']['buckets']: - email = bucket['key'] - count = bucket['doc_count'] + for bucket in res["aggregations"]["committers"]["buckets"]: + email = bucket["key"] + count = bucket["doc_count"] sha = email - if session.DB.ES.exists(index=session.DB.dbname,doc_type="person",id = sha): + if session.DB.ES.exists(index=session.DB.dbname, doc_type="person", id=sha): pres = session.DB.ES.get( - index=session.DB.dbname, - doc_type="person", - id = email - ) - person = pres['_source'] - person['name'] = person.get('name', 'unknown') + index=session.DB.dbname, doc_type="person", id=email + ) + person = pres["_source"] + person["name"] = person.get("name", "unknown") people[email] = person - people[email]['gravatar'] = hashlib.md5(person.get('email', 'unknown').encode('utf-8')).hexdigest() - people[email]['count'] = count + people[email]["gravatar"] = hashlib.md5( + person.get("email", "unknown").encode("utf-8") + ).hexdigest() + people[email]["count"] = count topN = [] for email, person in people.items(): topN.append(person) - topN = sorted(topN, key = lambda x: x['count'], reverse = True) + topN = sorted(topN, key=lambda x: x["count"], reverse=True) JSON_OUT = { - 'topN': { - 'denoter': 'replies posted', - 'items': topN, - }, - 'okay': True, - 'responseTime': time.time() - now, - 'widgetType': { - 'chartType': 'bar', - 'title': xtitle - } + "topN": {"denoter": "replies posted", "items": topN}, + "okay": True, + "responseTime": time.time() - now, + "widgetType": {"chartType": "bar", "title": xtitle}, } yield json.dumps(JSON_OUT) diff --git a/kibble/api/pages/forum/top-count.py b/kibble/api/pages/forum/top-count.py index 545567ac..35947b08 100644 --- a/kibble/api/pages/forum/top-count.py +++ b/kibble/api/pages/forum/top-count.py @@ -1,4 +1,3 @@ - # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information @@ -62,9 +61,6 @@ ######################################################################## - - - """ This is the TopN repos by commits list renderer for Kibble """ @@ -73,6 +69,7 @@ import time import re + def run(API, environ, indata, session): # We need to be logged in for this! @@ -83,76 +80,57 @@ def run(API, environ, indata, session): # First, fetch the view if we have such a thing enabled viewList = [] - if indata.get('view'): - viewList = session.getView(indata.get('view')) - if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - + if indata.get("view"): + viewList = session.getView(indata.get("view")) + if indata.get("subfilter"): + viewList = session.subFilter(indata.get("subfilter"), view=viewList) - dateTo = indata.get('to', int(time.time())) - dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span + dateTo = indata.get("to", int(time.time())) + dateFrom = indata.get( + "from", dateTo - (86400 * 30 * 6) + ) # Default to a 6 month span #################################################################### #################################################################### - dOrg = session.user['defaultOrganisation'] or "apache" + dOrg = session.user["defaultOrganisation"] or "apache" query = { - 'query': { - 'bool': { - 'must': [ - {'range': - { - 'created': { - 'from': dateFrom, - 'to': dateTo - } - } - }, - { - 'term': { - 'organisation': dOrg - } - } - ] - } - } + "query": { + "bool": { + "must": [ + {"range": {"created": {"from": dateFrom, "to": dateTo}}}, + {"term": {"organisation": dOrg}}, + ] } + } + } # Source-specific or view-specific?? - if indata.get('source'): - query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) + if indata.get("source"): + query["query"]["bool"]["must"].append( + {"term": {"sourceID": indata.get("source")}} + ) elif viewList: - query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) - if indata.get('email'): - query['query']['bool']['should'] = [ - {'term': {'creator': indata.get('email')}} - ] - + query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}}) + if indata.get("email"): + query["query"]["bool"]["should"] = [{"term": {"creator": indata.get("email")}}] # Get top 25 committers this period - query['aggs'] = { - 'by_repo': { - 'terms': { - 'field': 'sourceID', - 'size': 5000 - } - } - } + query["aggs"] = {"by_repo": {"terms": {"field": "sourceID", "size": 5000}}} res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="forum_post", - size = 0, - body = query - ) + index=session.DB.dbname, doc_type="forum_post", size=0, body=query + ) toprepos = [] - for bucket in res['aggregations']['by_repo']['buckets']: - ID = bucket['key'] - if session.DB.ES.exists(index=session.DB.dbname, doc_type="source", id = ID): - it = session.DB.ES.get(index=session.DB.dbname, doc_type="source", id = ID)['_source'] - repo = re.sub(r".+/([^/]+)$", r"\1", it['sourceURL']) - count = bucket['doc_count'] + for bucket in res["aggregations"]["by_repo"]["buckets"]: + ID = bucket["key"] + if session.DB.ES.exists(index=session.DB.dbname, doc_type="source", id=ID): + it = session.DB.ES.get(index=session.DB.dbname, doc_type="source", id=ID)[ + "_source" + ] + repo = re.sub(r".+/([^/]+)$", r"\1", it["sourceURL"]) + count = bucket["doc_count"] toprepos.append([repo, count]) - toprepos = sorted(toprepos, key = lambda x: x[1], reverse = True) + toprepos = sorted(toprepos, key=lambda x: x[1], reverse=True) top = toprepos[0:24] if len(toprepos) > 25: count = 0 @@ -164,9 +142,5 @@ def run(API, environ, indata, session): for v in top: tophash[v[0]] = v[1] - JSON_OUT = { - 'counts': tophash, - 'okay': True, - 'responseTime': time.time() - now, - } + JSON_OUT = {"counts": tophash, "okay": True, "responseTime": time.time() - now} yield json.dumps(JSON_OUT) diff --git a/kibble/api/pages/forum/top.py b/kibble/api/pages/forum/top.py index 483aef17..7775a17a 100644 --- a/kibble/api/pages/forum/top.py +++ b/kibble/api/pages/forum/top.py @@ -1,4 +1,3 @@ - # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information @@ -62,9 +61,6 @@ ######################################################################## - - - """ This is the issue actors stats page for Kibble """ @@ -73,6 +69,7 @@ import time import hashlib + def run(API, environ, indata, session): # We need to be logged in for this! @@ -83,79 +80,58 @@ def run(API, environ, indata, session): # First, fetch the view if we have such a thing enabled viewList = [] - if indata.get('view'): - viewList = session.getView(indata.get('view')) - if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - - - dateTo = indata.get('to', int(time.time())) - dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span + if indata.get("view"): + viewList = session.getView(indata.get("view")) + if indata.get("subfilter"): + viewList = session.subFilter(indata.get("subfilter"), view=viewList) - interval = indata.get('interval', 'month') + dateTo = indata.get("to", int(time.time())) + dateFrom = indata.get( + "from", dateTo - (86400 * 30 * 6) + ) # Default to a 6 month span + interval = indata.get("interval", "month") #################################################################### #################################################################### - dOrg = session.user['defaultOrganisation'] or "apache" + dOrg = session.user["defaultOrganisation"] or "apache" query = { - 'query': { - 'bool': { - 'must': [ - {'range': - { - 'created': { - 'from': dateFrom, - 'to': dateTo - } - } - }, - { - 'term': { - 'organisation': dOrg - } - } - ] - } - }, - 'sort': { - 'posts': 'desc' - } + "query": { + "bool": { + "must": [ + {"range": {"created": {"from": dateFrom, "to": dateTo}}}, + {"term": {"organisation": dOrg}}, + ] } + }, + "sort": {"posts": "desc"}, + } # Source-specific or view-specific?? - if indata.get('source'): - query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) + if indata.get("source"): + query["query"]["bool"]["must"].append( + {"term": {"sourceID": indata.get("source")}} + ) elif viewList: - query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) - if indata.get('email'): - query['query']['bool']['should'] = [{'term': {'creator': indata.get('email')}}] + query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}}) + if indata.get("email"): + query["query"]["bool"]["should"] = [{"term": {"creator": indata.get("email")}}] res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="forum_topic", - size = 25, - body = query - ) + index=session.DB.dbname, doc_type="forum_topic", size=25, body=query + ) top = [] - for bucket in res['hits']['hits']: - doc = bucket['_source'] - doc['source'] = doc.get('url', '#') - doc['name'] = doc.get('type', 'unknown') - doc['subject'] = doc.get('title') - doc['count'] = doc.get('posts', 0) + for bucket in res["hits"]["hits"]: + doc = bucket["_source"] + doc["source"] = doc.get("url", "#") + doc["name"] = doc.get("type", "unknown") + doc["subject"] = doc.get("title") + doc["count"] = doc.get("posts", 0) top.append(doc) - JSON_OUT = { - 'topN': { - 'denoter': 'interactions', - 'icon': 'comment', - 'items': top - }, - 'okay': True, - 'responseTime': time.time() - now, - 'widgetType': { - 'chartType': 'line' - } + "topN": {"denoter": "interactions", "icon": "comment", "items": top}, + "okay": True, + "responseTime": time.time() - now, + "widgetType": {"chartType": "line"}, } yield json.dumps(JSON_OUT) diff --git a/kibble/api/pages/forum/trends.py b/kibble/api/pages/forum/trends.py index 27efa264..f6ec6104 100644 --- a/kibble/api/pages/forum/trends.py +++ b/kibble/api/pages/forum/trends.py @@ -1,4 +1,3 @@ - # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information @@ -62,9 +61,6 @@ ######################################################################## - - - """ This is the forum trends renderer for Kibble """ @@ -72,6 +68,7 @@ import json import time + def run(API, environ, indata, session): # We need to be logged in for this! @@ -82,20 +79,20 @@ def run(API, environ, indata, session): # First, fetch the view if we have such a thing enabled viewList = [] - if indata.get('view'): - viewList = session.getView(indata.get('view')) - if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - - - dateTo = indata.get('to', int(time.time())) - dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span + if indata.get("view"): + viewList = session.getView(indata.get("view")) + if indata.get("subfilter"): + viewList = session.subFilter(indata.get("subfilter"), view=viewList) + + dateTo = indata.get("to", int(time.time())) + dateFrom = indata.get( + "from", dateTo - (86400 * 30 * 6) + ) # Default to a 6 month span if dateFrom < 0: dateFrom = 0 dateYonder = dateFrom - (dateTo - dateFrom) - - dOrg = session.user['defaultOrganisation'] or "apache" + dOrg = session.user["defaultOrganisation"] or "apache" #################################################################### # We start by doing all the queries for THIS period. # @@ -103,250 +100,157 @@ def run(API, environ, indata, session): # and rerun the same queries. # #################################################################### query = { - 'query': { - 'bool': { - 'must': [ - {'range': - { - 'created': { - 'from': dateFrom, - 'to': dateTo - } - } - }, - { - 'term': { - 'organisation': dOrg - } - } - ] - } - } + "query": { + "bool": { + "must": [ + {"range": {"created": {"from": dateFrom, "to": dateTo}}}, + {"term": {"organisation": dOrg}}, + ] } + } + } # Source-specific or view-specific?? - if indata.get('source'): - query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) + if indata.get("source"): + query["query"]["bool"]["must"].append( + {"term": {"sourceID": indata.get("source")}} + ) elif viewList: - query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) + query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}}) # Get number of issues created, this period res = session.DB.ES.count( - index=session.DB.dbname, - doc_type="forum_topic", - body = query - ) - no_issues_created = res['count'] - + index=session.DB.dbname, doc_type="forum_topic", body=query + ) + no_issues_created = res["count"] # Get number of open/close, this period - query['aggs'] = { - 'opener': { - 'cardinality': { - 'field': 'creator' - } - } - } + query["aggs"] = {"opener": {"cardinality": {"field": "creator"}}} res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="forum_topic", - size = 0, - body = query - ) - no_creators = res['aggregations']['opener']['value'] - + index=session.DB.dbname, doc_type="forum_topic", size=0, body=query + ) + no_creators = res["aggregations"]["opener"]["value"] # REPLIERS query = { - 'query': { - 'bool': { - 'must': [ - {'range': - { - 'created': { - 'from': dateFrom, - 'to': dateTo - } - } - }, - { - 'term': { - 'organisation': dOrg - } - } - ] - } - } + "query": { + "bool": { + "must": [ + {"range": {"created": {"from": dateFrom, "to": dateTo}}}, + {"term": {"organisation": dOrg}}, + ] } + } + } # Source-specific or view-specific?? - if indata.get('source'): - query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) + if indata.get("source"): + query["query"]["bool"]["must"].append( + {"term": {"sourceID": indata.get("source")}} + ) elif viewList: - query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) - + query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}}) # Get number of issues created, this period res = session.DB.ES.count( - index=session.DB.dbname, - doc_type="forum_post", - body = query - ) - no_issues_closed = res['count'] - + index=session.DB.dbname, doc_type="forum_post", body=query + ) + no_issues_closed = res["count"] # Get number of open/close, this period - query['aggs'] = { - 'closer': { - 'cardinality': { - 'field': 'creator' - } - } - } + query["aggs"] = {"closer": {"cardinality": {"field": "creator"}}} res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="forum_post", - size = 0, - body = query - ) - no_closers = res['aggregations']['closer']['value'] - + index=session.DB.dbname, doc_type="forum_post", size=0, body=query + ) + no_closers = res["aggregations"]["closer"]["value"] #################################################################### # Change to PRIOR SPAN # #################################################################### query = { - 'query': { - 'bool': { - 'must': [ - {'range': - { - 'created': { - 'from': dateYonder, - 'to': dateFrom-1 - } - } - }, - { - 'term': { - 'organisation': dOrg - } - } - ] - } - } + "query": { + "bool": { + "must": [ + {"range": {"created": {"from": dateYonder, "to": dateFrom - 1}}}, + {"term": {"organisation": dOrg}}, + ] } + } + } - if indata.get('source'): - query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) + if indata.get("source"): + query["query"]["bool"]["must"].append( + {"term": {"sourceID": indata.get("source")}} + ) elif viewList: - query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) - + query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}}) # Get number of issues, this period res = session.DB.ES.count( - index=session.DB.dbname, - doc_type="forum_topic", - body = query - ) - no_issues_created_before = res['count'] + index=session.DB.dbname, doc_type="forum_topic", body=query + ) + no_issues_created_before = res["count"] # Get number of committers, this period - query['aggs'] = { - 'opener': { - 'cardinality': { - 'field': 'creator' - } - } - } + query["aggs"] = {"opener": {"cardinality": {"field": "creator"}}} res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="forum_topic", - size = 0, - body = query - ) - no_creators_before = res['aggregations']['opener']['value'] - - + index=session.DB.dbname, doc_type="forum_topic", size=0, body=query + ) + no_creators_before = res["aggregations"]["opener"]["value"] # REPLIERS query = { - 'query': { - 'bool': { - 'must': [ - {'range': - { - 'created': { - 'from': dateYonder, - 'to': dateFrom-1 - } - } - }, - { - 'term': { - 'organisation': dOrg - } - } - ] - } - } + "query": { + "bool": { + "must": [ + {"range": {"created": {"from": dateYonder, "to": dateFrom - 1}}}, + {"term": {"organisation": dOrg}}, + ] } - if indata.get('source'): - query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) + } + } + if indata.get("source"): + query["query"]["bool"]["must"].append( + {"term": {"sourceID": indata.get("source")}} + ) elif viewList: - query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) + query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}}) # Get number of issues created, this period res = session.DB.ES.count( - index=session.DB.dbname, - doc_type="forum_post", - body = query - ) - no_issues_closed_before = res['count'] - + index=session.DB.dbname, doc_type="forum_post", body=query + ) + no_issues_closed_before = res["count"] # Get number of open/close, this period - query['aggs'] = { - 'closer': { - 'cardinality': { - 'field': "creator" - } - } - } + query["aggs"] = {"closer": {"cardinality": {"field": "creator"}}} res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="forum_post", - size = 0, - body = query - ) - no_closers_before = res['aggregations']['closer']['value'] + index=session.DB.dbname, doc_type="forum_post", size=0, body=query + ) + no_closers_before = res["aggregations"]["closer"]["value"] trends = { "created": { - 'before': no_issues_created_before, - 'after': no_issues_created, - 'title': "Topics started this period" + "before": no_issues_created_before, + "after": no_issues_created, + "title": "Topics started this period", }, "authors": { - 'before': no_creators_before, - 'after': no_creators, - 'title': "People starting topics this period" + "before": no_creators_before, + "after": no_creators, + "title": "People starting topics this period", }, "closed": { - 'before': no_issues_closed_before, - 'after': no_issues_closed, - 'title': "Replies this period" + "before": no_issues_closed_before, + "after": no_issues_closed, + "title": "Replies this period", }, "closers": { - 'before': no_closers_before, - 'after': no_closers, - 'title': "People replying this period" - } + "before": no_closers_before, + "after": no_closers, + "title": "People replying this period", + }, } - JSON_OUT = { - 'trends': trends, - 'okay': True, - 'responseTime': time.time() - now - } + JSON_OUT = {"trends": trends, "okay": True, "responseTime": time.time() - now} yield json.dumps(JSON_OUT) diff --git a/kibble/api/pages/issue/actors.py b/kibble/api/pages/issue/actors.py index edcbc541..29308fda 100644 --- a/kibble/api/pages/issue/actors.py +++ b/kibble/api/pages/issue/actors.py @@ -1,4 +1,3 @@ - # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information @@ -62,9 +61,6 @@ ######################################################################## - - - """ This is the issue actors stats page for Kibble """ @@ -73,6 +69,7 @@ import time import hashlib + def run(API, environ, indata, session): # We need to be logged in for this! @@ -83,165 +80,118 @@ def run(API, environ, indata, session): # First, fetch the view if we have such a thing enabled viewList = [] - if indata.get('view'): - viewList = session.getView(indata.get('view')) - if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - + if indata.get("view"): + viewList = session.getView(indata.get("view")) + if indata.get("subfilter"): + viewList = session.subFilter(indata.get("subfilter"), view=viewList) - dateTo = indata.get('to', int(time.time())) - dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span - - interval = indata.get('interval', 'month') + dateTo = indata.get("to", int(time.time())) + dateFrom = indata.get( + "from", dateTo - (86400 * 30 * 6) + ) # Default to a 6 month span + interval = indata.get("interval", "month") #################################################################### #################################################################### - dOrg = session.user['defaultOrganisation'] or "apache" + dOrg = session.user["defaultOrganisation"] or "apache" query = { - 'query': { - 'bool': { - 'must': [ - {'range': - { - 'closed': { - 'from': dateFrom, - 'to': dateTo - } - } - }, - { - 'term': { - 'organisation': dOrg - } - } - ] - } - } + "query": { + "bool": { + "must": [ + {"range": {"closed": {"from": dateFrom, "to": dateTo}}}, + {"term": {"organisation": dOrg}}, + ] } + } + } # Source-specific or view-specific?? - if indata.get('source'): - query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) + if indata.get("source"): + query["query"]["bool"]["must"].append( + {"term": {"sourceID": indata.get("source")}} + ) elif viewList: - query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) - if indata.get('email'): - query['query']['bool']['should'] = [{'term': {'issueCreator': indata.get('email')}}, {'term': {'issueCloser': indata.get('email')}}] - query['query']['bool']['minimum_should_match'] = 1 + query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}}) + if indata.get("email"): + query["query"]["bool"]["should"] = [ + {"term": {"issueCreator": indata.get("email")}}, + {"term": {"issueCloser": indata.get("email")}}, + ] + query["query"]["bool"]["minimum_should_match"] = 1 # Get timeseries for this period - query['aggs'] = { - 'per_interval': { - 'date_histogram': { - 'field': 'closedDate', - 'interval': interval - }, - 'aggs': { - 'by_user': { - 'cardinality': { - 'field': 'issueCloser' - } - } - } - } + query["aggs"] = { + "per_interval": { + "date_histogram": {"field": "closedDate", "interval": interval}, + "aggs": {"by_user": {"cardinality": {"field": "issueCloser"}}}, } + } res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="issue", - size = 0, - body = query - ) + index=session.DB.dbname, doc_type="issue", size=0, body=query + ) timeseries = {} - for bucket in res['aggregations']['per_interval']['buckets']: - ts = int(bucket['key'] / 1000) - ccount = bucket['by_user']['value'] - timeseries[ts] = { - 'date': ts, - 'closers': ccount, - 'openers': 0 - } - + for bucket in res["aggregations"]["per_interval"]["buckets"]: + ts = int(bucket["key"] / 1000) + ccount = bucket["by_user"]["value"] + timeseries[ts] = {"date": ts, "closers": ccount, "openers": 0} #################################################################### #################################################################### - dOrg = session.user['defaultOrganisation'] or "apache" + dOrg = session.user["defaultOrganisation"] or "apache" query = { - 'query': { - 'bool': { - 'must': [ - {'range': - { - 'created': { - 'from': dateFrom, - 'to': dateTo - } - } - }, - { - 'term': { - 'organisation': dOrg - } - } - ] - } - } + "query": { + "bool": { + "must": [ + {"range": {"created": {"from": dateFrom, "to": dateTo}}}, + {"term": {"organisation": dOrg}}, + ] } + } + } # Source-specific or view-specific?? - if indata.get('source'): - query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) + if indata.get("source"): + query["query"]["bool"]["must"].append( + {"term": {"sourceID": indata.get("source")}} + ) elif viewList: - query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) - if indata.get('email'): - query['query']['bool']['should'] = [{'term': {'issueCreator': indata.get('email')}}, {'term': {'issueCloser': indata.get('email')}}] - query['query']['bool']['minimum_should_match'] = 1 + query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}}) + if indata.get("email"): + query["query"]["bool"]["should"] = [ + {"term": {"issueCreator": indata.get("email")}}, + {"term": {"issueCloser": indata.get("email")}}, + ] + query["query"]["bool"]["minimum_should_match"] = 1 # Get timeseries for this period - query['aggs'] = { - 'per_interval': { - 'date_histogram': { - 'field': 'createdDate', - 'interval': interval - }, - 'aggs': { - 'by_user': { - 'cardinality': { - 'field': 'issueCreator' - } - } - } - } + query["aggs"] = { + "per_interval": { + "date_histogram": {"field": "createdDate", "interval": interval}, + "aggs": {"by_user": {"cardinality": {"field": "issueCreator"}}}, } + } res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="issue", - size = 0, - body = query - ) + index=session.DB.dbname, doc_type="issue", size=0, body=query + ) - for bucket in res['aggregations']['per_interval']['buckets']: - ts = int(bucket['key'] / 1000) - ccount = bucket['by_user']['value'] + for bucket in res["aggregations"]["per_interval"]["buckets"]: + ts = int(bucket["key"] / 1000) + ccount = bucket["by_user"]["value"] if ts in timeseries: - timeseries[ts]['openers'] = ccount + timeseries[ts]["openers"] = ccount else: - timeseries[ts] = { - 'date': ts, - 'closers': 0, - 'openers': ccount - } + timeseries[ts] = {"date": ts, "closers": 0, "openers": ccount} ts = [] for x, el in timeseries.items(): ts.append(el) JSON_OUT = { - 'timeseries': ts, - 'okay': True, - 'responseTime': time.time() - now, - 'widgetType': { - 'chartType': 'bar' - } + "timeseries": ts, + "okay": True, + "responseTime": time.time() - now, + "widgetType": {"chartType": "bar"}, } yield json.dumps(JSON_OUT) diff --git a/kibble/api/pages/issue/age.py b/kibble/api/pages/issue/age.py index cafdaf49..d5d98691 100644 --- a/kibble/api/pages/issue/age.py +++ b/kibble/api/pages/issue/age.py @@ -1,4 +1,3 @@ - # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information @@ -62,9 +61,6 @@ ######################################################################## - - - """ This is the issue actors stats page for Kibble """ @@ -73,6 +69,7 @@ import time import hashlib + def run(API, environ, indata, session): # We need to be logged in for this! @@ -83,78 +80,58 @@ def run(API, environ, indata, session): # First, fetch the view if we have such a thing enabled viewList = [] - if indata.get('view'): - viewList = session.getView(indata.get('view')) - if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - + if indata.get("view"): + viewList = session.getView(indata.get("view")) + if indata.get("subfilter"): + viewList = session.subFilter(indata.get("subfilter"), view=viewList) - interval = indata.get('interval', 'month') + interval = indata.get("interval", "month") #################################################################### #################################################################### - dOrg = session.user['defaultOrganisation'] or "apache" + dOrg = session.user["defaultOrganisation"] or "apache" query = { - 'query': { - 'bool': { - 'must': [ - { - 'term': { - 'status': 'open' - } - }, - { - 'term': { - 'organisation': dOrg - } - } - ] - } - } + "query": { + "bool": { + "must": [{"term": {"status": "open"}}, {"term": {"organisation": dOrg}}] } + } + } # Source-specific or view-specific?? - if indata.get('source'): - query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) + if indata.get("source"): + query["query"]["bool"]["must"].append( + {"term": {"sourceID": indata.get("source")}} + ) elif viewList: - query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) - if indata.get('email'): - query['query']['bool']['should'] = [{'term': {'issueCreator': indata.get('email')}}, {'term': {'issueCloser': indata.get('email')}}] - query['query']['bool']['minimum_should_match'] = 1 + query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}}) + if indata.get("email"): + query["query"]["bool"]["should"] = [ + {"term": {"issueCreator": indata.get("email")}}, + {"term": {"issueCloser": indata.get("email")}}, + ] + query["query"]["bool"]["minimum_should_match"] = 1 # Get timeseries for this period - query['aggs'] = { - 'per_interval': { - 'date_histogram': { - 'field': 'createdDate', - 'interval': interval - } - } + query["aggs"] = { + "per_interval": { + "date_histogram": {"field": "createdDate", "interval": interval} } + } res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="issue", - size = 0, - body = query - ) + index=session.DB.dbname, doc_type="issue", size=0, body=query + ) timeseries = [] opened = 0 - for bucket in res['aggregations']['per_interval']['buckets']: - ts = int(bucket['key'] / 1000) - opened += bucket['doc_count'] - timeseries.append( { - 'date': ts, - 'open': opened - }) - - + for bucket in res["aggregations"]["per_interval"]["buckets"]: + ts = int(bucket["key"] / 1000) + opened += bucket["doc_count"] + timeseries.append({"date": ts, "open": opened}) JSON_OUT = { - 'timeseries': timeseries, - 'okay': True, - 'responseTime': time.time() - now, - 'widgetType': { - 'chartType': 'line' - } + "timeseries": timeseries, + "okay": True, + "responseTime": time.time() - now, + "widgetType": {"chartType": "line"}, } yield json.dumps(JSON_OUT) diff --git a/kibble/api/pages/issue/closers.py b/kibble/api/pages/issue/closers.py index 90bd56a4..94a409a7 100644 --- a/kibble/api/pages/issue/closers.py +++ b/kibble/api/pages/issue/closers.py @@ -1,4 +1,3 @@ - # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information @@ -62,9 +61,6 @@ ######################################################################## - - - """ This is the TopN issue closers list renderer for Kibble """ @@ -73,6 +69,7 @@ import time import hashlib + def run(API, environ, indata, session): # We need to be logged in for this! @@ -83,102 +80,76 @@ def run(API, environ, indata, session): # First, fetch the view if we have such a thing enabled viewList = [] - if indata.get('view'): - viewList = session.getView(indata.get('view')) - if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - + if indata.get("view"): + viewList = session.getView(indata.get("view")) + if indata.get("subfilter"): + viewList = session.subFilter(indata.get("subfilter"), view=viewList) - dateTo = indata.get('to', int(time.time())) - dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span + dateTo = indata.get("to", int(time.time())) + dateFrom = indata.get( + "from", dateTo - (86400 * 30 * 6) + ) # Default to a 6 month span - interval = indata.get('interval', 'month') + interval = indata.get("interval", "month") xtitle = None - #################################################################### #################################################################### - dOrg = session.user['defaultOrganisation'] or "apache" + dOrg = session.user["defaultOrganisation"] or "apache" query = { - 'query': { - 'bool': { - 'must': [ - {'range': - { - 'closed': { - 'from': dateFrom, - 'to': dateTo - } - } - }, - { - 'term': { - 'organisation': dOrg - } - } - ] - } - } + "query": { + "bool": { + "must": [ + {"range": {"closed": {"from": dateFrom, "to": dateTo}}}, + {"term": {"organisation": dOrg}}, + ] } + } + } # Source-specific or view-specific?? - if indata.get('source'): - query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) + if indata.get("source"): + query["query"]["bool"]["must"].append( + {"term": {"sourceID": indata.get("source")}} + ) elif viewList: - query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) - if indata.get('email'): - query['query']['bool']['must'].append({'term': {'issueCreator': indata.get('email')}}) - xTitle = "People closing %s's issues" % indata.get('email') + query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}}) + if indata.get("email"): + query["query"]["bool"]["must"].append( + {"term": {"issueCreator": indata.get("email")}} + ) + xTitle = "People closing %s's issues" % indata.get("email") # Get top 25 committers this period - query['aggs'] = { - 'committers': { - 'terms': { - 'field': 'issueCloser', - 'size': 25 - }, - 'aggs': { - - } - } + query["aggs"] = { + "committers": {"terms": {"field": "issueCloser", "size": 25}, "aggs": {}} } res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="issue", - size = 0, - body = query - ) + index=session.DB.dbname, doc_type="issue", size=0, body=query + ) people = {} - for bucket in res['aggregations']['committers']['buckets']: - email = bucket['key'] - count = bucket['doc_count'] - sha = hashlib.sha1( ("%s%s" % (dOrg, email)).encode('utf-8') ).hexdigest() - if session.DB.ES.exists(index=session.DB.dbname,doc_type="person",id = sha): - pres = session.DB.ES.get( - index=session.DB.dbname, - doc_type="person", - id = sha - ) - person = pres['_source'] - person['name'] = person.get('name', 'unknown') + for bucket in res["aggregations"]["committers"]["buckets"]: + email = bucket["key"] + count = bucket["doc_count"] + sha = hashlib.sha1(("%s%s" % (dOrg, email)).encode("utf-8")).hexdigest() + if session.DB.ES.exists(index=session.DB.dbname, doc_type="person", id=sha): + pres = session.DB.ES.get(index=session.DB.dbname, doc_type="person", id=sha) + person = pres["_source"] + person["name"] = person.get("name", "unknown") people[email] = person - people[email]['gravatar'] = hashlib.md5(person.get('email', 'unknown').encode('utf-8')).hexdigest() - people[email]['count'] = count + people[email]["gravatar"] = hashlib.md5( + person.get("email", "unknown").encode("utf-8") + ).hexdigest() + people[email]["count"] = count topN = [] for email, person in people.items(): topN.append(person) - topN = sorted(topN, key = lambda x: x['count'], reverse = True) + topN = sorted(topN, key=lambda x: x["count"], reverse=True) JSON_OUT = { - 'topN': { - 'denoter': 'issues closed', - 'items': topN, - }, - 'okay': True, - 'responseTime': time.time() - now, - 'widgetType': { - 'chartType': 'bar', - 'title': xtitle - } + "topN": {"denoter": "issues closed", "items": topN}, + "okay": True, + "responseTime": time.time() - now, + "widgetType": {"chartType": "bar", "title": xtitle}, } yield json.dumps(JSON_OUT) diff --git a/kibble/api/pages/issue/issues.py b/kibble/api/pages/issue/issues.py index b947cb1d..bdc6ae05 100644 --- a/kibble/api/pages/issue/issues.py +++ b/kibble/api/pages/issue/issues.py @@ -1,4 +1,3 @@ - # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information @@ -62,9 +61,6 @@ ######################################################################## - - - """ This is the issue timeseries renderer for Kibble """ @@ -78,10 +74,11 @@ def makeTS(dist): ts = {} for k in dist: - ts[k + ' opened'] = 0 - ts[k + ' closed'] = 0 + ts[k + " opened"] = 0 + ts[k + " closed"] = 0 return ts + def run(API, environ, indata, session): # We need to be logged in for this! @@ -92,28 +89,24 @@ def run(API, environ, indata, session): # First, fetch the view if we have such a thing enabled viewList = [] - if indata.get('view'): - viewList = session.getView(indata.get('view')) - if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - + if indata.get("view"): + viewList = session.getView(indata.get("view")) + if indata.get("subfilter"): + viewList = session.subFilter(indata.get("subfilter"), view=viewList) - dateTo = indata.get('to', int(time.time())) - dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span + dateTo = indata.get("to", int(time.time())) + dateFrom = indata.get( + "from", dateTo - (86400 * 30 * 6) + ) # Default to a 6 month span - interval = indata.get('interval', 'month') + interval = indata.get("interval", "month") # By default, we lump PRs and issues into the same category - distinct = { - 'issues': ['issue', 'pullrequest'] - } + distinct = {"issues": ["issue", "pullrequest"]} # If requested, we split them into two - if indata.get('distinguish', False): - distinct = { - 'issues': ['issue'], - 'pull requests': ['pullrequest'] - } + if indata.get("distinguish", False): + distinct = {"issues": ["issue"], "pull requests": ["pullrequest"]} timeseries = {} @@ -123,138 +116,104 @@ def run(API, environ, indata, session): #################################################################### # ISSUES OPENED # #################################################################### - dOrg = session.user['defaultOrganisation'] or "apache" + dOrg = session.user["defaultOrganisation"] or "apache" query = { - 'query': { - 'bool': { - 'must': [ - {'range': - { - 'created': { - 'from': dateFrom, - 'to': dateTo - } - } - }, - { - 'term': { - 'organisation': dOrg - } - }, - { - 'terms': { - 'issuetype': iValues - } - } - ] - } - } + "query": { + "bool": { + "must": [ + {"range": {"created": {"from": dateFrom, "to": dateTo}}}, + {"term": {"organisation": dOrg}}, + {"terms": {"issuetype": iValues}}, + ] } + } + } # Source-specific or view-specific?? - if indata.get('source'): - query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) + if indata.get("source"): + query["query"]["bool"]["must"].append( + {"term": {"sourceID": indata.get("source")}} + ) elif viewList: - query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) - if indata.get('email'): - query['query']['bool']['must'].append({'term': {'issueCreator': indata.get('email')}}) + query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}}) + if indata.get("email"): + query["query"]["bool"]["must"].append( + {"term": {"issueCreator": indata.get("email")}} + ) # Get number of opened ones, this period - query['aggs'] = { - 'commits': { - 'date_histogram': { - 'field': 'createdDate', - 'interval': interval - } - } + query["aggs"] = { + "commits": { + "date_histogram": {"field": "createdDate", "interval": interval} } + } res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="issue", - size = 0, - body = query - ) + index=session.DB.dbname, doc_type="issue", size=0, body=query + ) - for bucket in res['aggregations']['commits']['buckets']: - ts = int(bucket['key'] / 1000) - count = bucket['doc_count'] + for bucket in res["aggregations"]["commits"]["buckets"]: + ts = int(bucket["key"] / 1000) + count = bucket["doc_count"] timeseries[ts] = timeseries.get(ts, makeTS(distinct)) - timeseries[ts][iType + ' opened'] = timeseries[ts].get(iType + ' opened', 0) + count - + timeseries[ts][iType + " opened"] = ( + timeseries[ts].get(iType + " opened", 0) + count + ) #################################################################### # ISSUES CLOSED # #################################################################### - dOrg = session.user['defaultOrganisation'] or "apache" + dOrg = session.user["defaultOrganisation"] or "apache" query = { - 'query': { - 'bool': { - 'must': [ - {'range': - { - 'closed': { - 'from': dateFrom, - 'to': dateTo - } - } - }, - { - 'term': { - 'organisation': dOrg - } - }, - { - 'terms': { - 'issuetype': iValues - } - } - ] - } - } + "query": { + "bool": { + "must": [ + {"range": {"closed": {"from": dateFrom, "to": dateTo}}}, + {"term": {"organisation": dOrg}}, + {"terms": {"issuetype": iValues}}, + ] } + } + } if viewList: - query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) - if indata.get('source'): - query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) - if indata.get('email'): - query['query']['bool']['must'].append({'term': {'issueCloser': indata.get('email')}}) + query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}}) + if indata.get("source"): + query["query"]["bool"]["must"].append( + {"term": {"sourceID": indata.get("source")}} + ) + if indata.get("email"): + query["query"]["bool"]["must"].append( + {"term": {"issueCloser": indata.get("email")}} + ) # Get number of closed ones, this period - query['aggs'] = { - 'commits': { - 'date_histogram': { - 'field': 'closedDate', - 'interval': interval - } - } - } + query["aggs"] = { + "commits": {"date_histogram": {"field": "closedDate", "interval": interval}} + } res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="issue", - size = 0, - body = query - ) + index=session.DB.dbname, doc_type="issue", size=0, body=query + ) - for bucket in res['aggregations']['commits']['buckets']: - ts = int(bucket['key'] / 1000) - count = bucket['doc_count'] + for bucket in res["aggregations"]["commits"]["buckets"]: + ts = int(bucket["key"] / 1000) + count = bucket["doc_count"] timeseries[ts] = timeseries.get(ts, makeTS(distinct)) - timeseries[ts][iType + ' closed'] = timeseries[ts].get(iType + ' closed', 0) + count + timeseries[ts][iType + " closed"] = ( + timeseries[ts].get(iType + " closed", 0) + count + ) ts = [] for k, v in timeseries.items(): - v['date'] = k + v["date"] = k ts.append(v) - JSON_OUT = { - 'widgetType': { - 'chartType': 'line', # Recommendation for the UI - 'nofill': True + "widgetType": { + "chartType": "line", # Recommendation for the UI + "nofill": True, }, - 'timeseries': ts, - 'interval': interval, - 'okay': True, - 'distinguishable': True, - 'responseTime': time.time() - now + "timeseries": ts, + "interval": interval, + "okay": True, + "distinguishable": True, + "responseTime": time.time() - now, } yield json.dumps(JSON_OUT) diff --git a/kibble/api/pages/issue/openers.py b/kibble/api/pages/issue/openers.py index 85500742..cee81abb 100644 --- a/kibble/api/pages/issue/openers.py +++ b/kibble/api/pages/issue/openers.py @@ -1,4 +1,3 @@ - # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information @@ -62,9 +61,6 @@ ######################################################################## - - - """ This is the TopN issue openers list renderer for Kibble """ @@ -73,6 +69,7 @@ import time import hashlib + def run(API, environ, indata, session): # We need to be logged in for this! @@ -83,101 +80,76 @@ def run(API, environ, indata, session): # First, fetch the view if we have such a thing enabled viewList = [] - if indata.get('view'): - viewList = session.getView(indata.get('view')) - if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - + if indata.get("view"): + viewList = session.getView(indata.get("view")) + if indata.get("subfilter"): + viewList = session.subFilter(indata.get("subfilter"), view=viewList) - dateTo = indata.get('to', int(time.time())) - dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span + dateTo = indata.get("to", int(time.time())) + dateFrom = indata.get( + "from", dateTo - (86400 * 30 * 6) + ) # Default to a 6 month span - interval = indata.get('interval', 'month') + interval = indata.get("interval", "month") xtitle = None #################################################################### #################################################################### - dOrg = session.user['defaultOrganisation'] or "apache" + dOrg = session.user["defaultOrganisation"] or "apache" query = { - 'query': { - 'bool': { - 'must': [ - {'range': - { - 'created': { - 'from': dateFrom, - 'to': dateTo - } - } - }, - { - 'term': { - 'organisation': dOrg - } - } - ] - } - } + "query": { + "bool": { + "must": [ + {"range": {"created": {"from": dateFrom, "to": dateTo}}}, + {"term": {"organisation": dOrg}}, + ] } + } + } # Source-specific or view-specific?? - if indata.get('source'): - query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) + if indata.get("source"): + query["query"]["bool"]["must"].append( + {"term": {"sourceID": indata.get("source")}} + ) elif viewList: - query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) - if indata.get('email'): - query['query']['bool']['must'].append({'term': {'issueCloser': indata.get('email')}}) - xtitle = "People opening issues solved by %s" % indata.get('email') + query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}}) + if indata.get("email"): + query["query"]["bool"]["must"].append( + {"term": {"issueCloser": indata.get("email")}} + ) + xtitle = "People opening issues solved by %s" % indata.get("email") # Get top 25 committers this period - query['aggs'] = { - 'committers': { - 'terms': { - 'field': 'issueCreator', - 'size': 25 - }, - 'aggs': { - - } - } + query["aggs"] = { + "committers": {"terms": {"field": "issueCreator", "size": 25}, "aggs": {}} } res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="issue", - size = 0, - body = query - ) + index=session.DB.dbname, doc_type="issue", size=0, body=query + ) people = {} - for bucket in res['aggregations']['committers']['buckets']: - email = bucket['key'] - count = bucket['doc_count'] - sha = hashlib.sha1( ("%s%s" % (dOrg, email)).encode('utf-8') ).hexdigest() - if session.DB.ES.exists(index=session.DB.dbname,doc_type="person",id = sha): - pres = session.DB.ES.get( - index=session.DB.dbname, - doc_type="person", - id = sha - ) - person = pres['_source'] - person['name'] = person.get('name', 'unknown') + for bucket in res["aggregations"]["committers"]["buckets"]: + email = bucket["key"] + count = bucket["doc_count"] + sha = hashlib.sha1(("%s%s" % (dOrg, email)).encode("utf-8")).hexdigest() + if session.DB.ES.exists(index=session.DB.dbname, doc_type="person", id=sha): + pres = session.DB.ES.get(index=session.DB.dbname, doc_type="person", id=sha) + person = pres["_source"] + person["name"] = person.get("name", "unknown") people[email] = person - people[email]['gravatar'] = hashlib.md5(person.get('email', 'unknown').encode('utf-8')).hexdigest() - people[email]['count'] = count + people[email]["gravatar"] = hashlib.md5( + person.get("email", "unknown").encode("utf-8") + ).hexdigest() + people[email]["count"] = count topN = [] for email, person in people.items(): topN.append(person) - topN = sorted(topN, key = lambda x: x['count'], reverse = True) + topN = sorted(topN, key=lambda x: x["count"], reverse=True) JSON_OUT = { - 'topN': { - 'denoter': 'issues opened', - 'items': topN, - }, - 'okay': True, - 'responseTime': time.time() - now, - 'widgetType': { - 'chartType': 'bar', - 'title': xtitle - } + "topN": {"denoter": "issues opened", "items": topN}, + "okay": True, + "responseTime": time.time() - now, + "widgetType": {"chartType": "bar", "title": xtitle}, } yield json.dumps(JSON_OUT) diff --git a/kibble/api/pages/issue/pony-timeseries.py b/kibble/api/pages/issue/pony-timeseries.py index 22068ddd..1c1b2b8c 100644 --- a/kibble/api/pages/issue/pony-timeseries.py +++ b/kibble/api/pages/issue/pony-timeseries.py @@ -1,4 +1,3 @@ - # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information @@ -62,9 +61,6 @@ ######################################################################## - - - """ This is the pony factor renderer for Kibble """ @@ -75,6 +71,7 @@ import datetime import dateutil.relativedelta + def run(API, environ, indata, session): # We need to be logged in for this! @@ -85,13 +82,12 @@ def run(API, environ, indata, session): # First, fetch the view if we have such a thing enabled viewList = [] - if indata.get('view'): - viewList = session.getView(indata.get('view')) - if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - + if indata.get("view"): + viewList = session.getView(indata.get("view")) + if indata.get("subfilter"): + viewList = session.subFilter(indata.get("subfilter"), view=viewList) - hl = indata.get('span', 24) + hl = indata.get("span", 24) tnow = datetime.date.today() nm = tnow.month - (tnow.month % 3) ny = tnow.year @@ -111,110 +107,86 @@ def run(API, environ, indata, session): nm += 12 ny = ny - 1 - #################################################################### #################################################################### - dOrg = session.user['defaultOrganisation'] or "apache" + dOrg = session.user["defaultOrganisation"] or "apache" query = { - 'query': { - 'bool': { - 'must': [ - {'range': - { - 'created': { - 'from': tf, - 'to': t - } - } - }, - { - 'term': { - 'organisation': dOrg - } - } - ] - } - } + "query": { + "bool": { + "must": [ + {"range": {"created": {"from": tf, "to": t}}}, + {"term": {"organisation": dOrg}}, + ] } + } + } # Source-specific or view-specific?? - if indata.get('source'): - query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) + if indata.get("source"): + query["query"]["bool"]["must"].append( + {"term": {"sourceID": indata.get("source")}} + ) elif viewList: - query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) + query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}}) # Get an initial count of commits - res = session.DB.ES.count( - index=session.DB.dbname, - doc_type="issue", - body = query - ) + res = session.DB.ES.count(index=session.DB.dbname, doc_type="issue", body=query) - globcount = res['count'] + globcount = res["count"] if globcount == 0: break # Get top 25 committers this period - query['aggs'] = { - 'by_creator': { - 'terms': { - 'field': 'issueCreator', - 'size': 1000 - } - }, - 'by_closer': { - 'terms': { - 'field': 'issueCloser', - 'size': 1000 - } - } - } + query["aggs"] = { + "by_creator": {"terms": {"field": "issueCreator", "size": 1000}}, + "by_closer": {"terms": {"field": "issueCloser", "size": 1000}}, + } res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="issue", - size = 0, - body = query - ) + index=session.DB.dbname, doc_type="issue", size=0, body=query + ) cpf = {} # PF for openers pf_opener = 0 pf_opener_count = 0 - for bucket in res['aggregations']['by_creator']['buckets']: - count = bucket['doc_count'] + for bucket in res["aggregations"]["by_creator"]["buckets"]: + count = bucket["doc_count"] pf_opener += 1 pf_opener_count += count - if '@' in bucket['key']: - mldom = bucket['key'].lower().split('@')[-1] + if "@" in bucket["key"]: + mldom = bucket["key"].lower().split("@")[-1] cpf[mldom] = True - if pf_opener_count > int(globcount/2): + if pf_opener_count > int(globcount / 2): break # PF for closer pf_closer = 0 pf_closer_count = 0 - for bucket in res['aggregations']['by_closer']['buckets']: - count = bucket['doc_count'] + for bucket in res["aggregations"]["by_closer"]["buckets"]: + count = bucket["doc_count"] pf_closer += 1 pf_closer_count += count - if '@' in bucket['key']: - mldom = bucket['key'].lower().split('@')[-1] + if "@" in bucket["key"]: + mldom = bucket["key"].lower().split("@")[-1] cpf[mldom] = True - if pf_closer_count > int(globcount/2): + if pf_closer_count > int(globcount / 2): break - ts.append({ - 'date': t, - 'Pony Factor (openers)': pf_opener, - 'Pony Factor (closers)': pf_closer, - 'Meta-Pony Factor': len(cpf) - }) + ts.append( + { + "date": t, + "Pony Factor (openers)": pf_opener, + "Pony Factor (closers)": pf_closer, + "Meta-Pony Factor": len(cpf), + } + ) - ts = sorted(ts, key = lambda x: x['date']) + ts = sorted(ts, key=lambda x: x["date"]) JSON_OUT = { - 'text': "This shows Pony Factors as calculated over a %u month timespan. Openers measures the people submitting the bulk of the issues, closers mesaures the people closing (resolving) the issues, and meta-pony is an estimation of how many organisations/companies are involved." % hl, - 'timeseries': ts, - 'okay': True, - 'responseTime': time.time() - now, + "text": "This shows Pony Factors as calculated over a %u month timespan. Openers measures the people submitting the bulk of the issues, closers mesaures the people closing (resolving) the issues, and meta-pony is an estimation of how many organisations/companies are involved." + % hl, + "timeseries": ts, + "okay": True, + "responseTime": time.time() - now, } yield json.dumps(JSON_OUT) diff --git a/kibble/api/pages/issue/relationships.py b/kibble/api/pages/issue/relationships.py index eb8fb767..3b4985b8 100644 --- a/kibble/api/pages/issue/relationships.py +++ b/kibble/api/pages/issue/relationships.py @@ -1,4 +1,3 @@ - # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information @@ -62,9 +61,6 @@ ######################################################################## - - - """ This is the issue tracker relationship list renderer for Kibble """ @@ -76,6 +72,7 @@ import re import math + def run(API, environ, indata, session): # We need to be logged in for this! @@ -86,71 +83,56 @@ def run(API, environ, indata, session): # First, fetch the view if we have such a thing enabled viewList = [] - if indata.get('view'): - viewList = session.getView(indata.get('view')) - if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - - dateTo = indata.get('to', int(time.time())) - dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span + if indata.get("view"): + viewList = session.getView(indata.get("view")) + if indata.get("subfilter"): + viewList = session.subFilter(indata.get("subfilter"), view=viewList) - which = 'committer_email' - role = 'committer' - if indata.get('author', False): - which = 'author_email' - role = 'author' + dateTo = indata.get("to", int(time.time())) + dateFrom = indata.get( + "from", dateTo - (86400 * 30 * 6) + ) # Default to a 6 month span - interval = indata.get('interval', 'day') + which = "committer_email" + role = "committer" + if indata.get("author", False): + which = "author_email" + role = "author" + interval = indata.get("interval", "day") #################################################################### #################################################################### - dOrg = session.user['defaultOrganisation'] or "apache" + dOrg = session.user["defaultOrganisation"] or "apache" query = { - 'query': { - 'bool': { - 'must': [ - {'range': - { - 'closed': { - 'from': dateFrom, - 'to': dateTo - } - } - }, - { - 'term': { - 'organisation': dOrg - } - } - ] - } - } + "query": { + "bool": { + "must": [ + {"range": {"closed": {"from": dateFrom, "to": dateTo}}}, + {"term": {"organisation": dOrg}}, + ] } + } + } # Source-specific or view-specific?? - if indata.get('source'): - query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) + if indata.get("source"): + query["query"]["bool"]["must"].append( + {"term": {"sourceID": indata.get("source")}} + ) elif viewList: - query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) - if indata.get('email'): - query['query']['bool']['should'] = [{'term': {'issueCreator': indata.get('email')}}, {'term': {'issueCloser': indata.get('email')}}] - query['query']['bool']['minimum_should_match'] = 1 + query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}}) + if indata.get("email"): + query["query"]["bool"]["should"] = [ + {"term": {"issueCreator": indata.get("email")}}, + {"term": {"issueCloser": indata.get("email")}}, + ] + query["query"]["bool"]["minimum_should_match"] = 1 # Get number of commits, this period, per repo - query['aggs'] = { - 'per_repo': { - 'terms': { - 'field': 'sourceID', - 'size': 10000 - } - } - } + query["aggs"] = {"per_repo": {"terms": {"field": "sourceID", "size": 10000}}} res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="issue", - size = 0, - body = query - ) + index=session.DB.dbname, doc_type="issue", size=0, body=query + ) repos = {} repo_commits = {} @@ -161,38 +143,25 @@ def run(API, environ, indata, session): max_authors = 0 # For each repo, count commits and gather data on authors - for doc in res['aggregations']['per_repo']['buckets']: - sourceID = doc['key'] - commits = doc['doc_count'] + for doc in res["aggregations"]["per_repo"]["buckets"]: + sourceID = doc["key"] + commits = doc["doc_count"] # Gather the unique authors/committers - query['aggs'] = { - 'per_closer': { - 'terms': { - 'field': 'issueCloser', - 'size': 10000 - } - }, - 'per_creator': { - 'terms': { - 'field': 'issueCreator', - 'size': 10000 - } - } + query["aggs"] = { + "per_closer": {"terms": {"field": "issueCloser", "size": 10000}}, + "per_creator": {"terms": {"field": "issueCreator", "size": 10000}}, } xquery = copy.deepcopy(query) - xquery['query']['bool']['must'].append({'term': {'sourceID': sourceID}}) + xquery["query"]["bool"]["must"].append({"term": {"sourceID": sourceID}}) xres = session.DB.ES.search( - index=session.DB.dbname, - doc_type="issue", - size = 0, - body = xquery + index=session.DB.dbname, doc_type="issue", size=0, body=xquery ) authors = [] - for person in xres['aggregations']['per_closer']['buckets']: - authors.append(person['key']) - for person in xres['aggregations']['per_creator']['buckets']: - authors.append(person['key']) + for person in xres["aggregations"]["per_closer"]["buckets"]: + authors.append(person["key"]) + for person in xres["aggregations"]["per_creator"]["buckets"]: + authors.append(person["key"]) if commits > max_commits: max_commits = commits repos[sourceID] = authors @@ -203,14 +172,16 @@ def run(API, environ, indata, session): repo_notoriety = {} repodatas = {} repo_authors = {} - minLinks = indata.get('links', 1) + minLinks = indata.get("links", 1) # Grab data of all sources for ID, repo in repos.items(): mylinks = {} - if not session.DB.ES.exists(index=session.DB.dbname, doc_type="source", id = ID): + if not session.DB.ES.exists(index=session.DB.dbname, doc_type="source", id=ID): continue - repodatas[ID] = session.DB.ES.get(index=session.DB.dbname, doc_type="source", id = ID) + repodatas[ID] = session.DB.ES.get( + index=session.DB.dbname, doc_type="source", id=ID + ) for ID, repo in repos.items(): mylinks = {} @@ -218,49 +189,59 @@ def run(API, environ, indata, session): continue repodata = repodatas[ID] oID = ID - if indata.get('collapse'): - m = re.search(indata.get('collapse'), repodata['_source']['sourceURL']) + if indata.get("collapse"): + m = re.search(indata.get("collapse"), repodata["_source"]["sourceURL"]) if m: ID = m.group(1) else: - ID = re.sub(r"^.+/", "", repodata['_source']['sourceURL']) + ID = re.sub(r"^.+/", "", repodata["_source"]["sourceURL"]) for xID, xrepo in repos.items(): if xID in repodatas: xrepodata = repodatas[xID] - if indata.get('collapse'): - m = re.search(indata.get('collapse'), xrepodata['_source']['sourceURL']) + if indata.get("collapse"): + m = re.search( + indata.get("collapse"), xrepodata["_source"]["sourceURL"] + ) if m: xID = m.group(1) else: - xID = re.sub(r"^.+/", "", xrepodata['_source']['sourceURL']) + xID = re.sub(r"^.+/", "", xrepodata["_source"]["sourceURL"]) if xID != ID: xlinks = [] for author in xrepo: if author in repo: xlinks.append(author) - lname = "%s@%s" % (ID, xID) # Link name - rname = "%s@%s" % (xID, ID) # Reverse link name + lname = "%s@%s" % (ID, xID) # Link name + rname = "%s@%s" % (xID, ID) # Reverse link name if len(xlinks) >= minLinks and not rname in repo_links: mylinks[xID] = len(xlinks) - repo_links[lname] = repo_links.get(lname, 0) + len(xlinks) # How many contributors in common between project A and B? + repo_links[lname] = repo_links.get(lname, 0) + len( + xlinks + ) # How many contributors in common between project A and B? if repo_links[lname] > max_shared: max_shared = repo_links[lname] if ID not in repo_notoriety: repo_notoriety[ID] = set() - repo_notoriety[ID].update(mylinks.keys()) # How many projects is this repo connected to? + repo_notoriety[ID].update( + mylinks.keys() + ) # How many projects is this repo connected to? if ID not in repo_authors: repo_authors[ID] = set() - repo_authors[ID].update(repo) # How many projects is this repo connected to? + repo_authors[ID].update(repo) # How many projects is this repo connected to? if ID != oID: repo_commits[ID] = repo_commits.get(ID, 0) + repo_commits[oID] if repo_commits[ID] > max_commits: - max_commits = repo_commits[ID] # Used for calculating max link thickness + max_commits = repo_commits[ + ID + ] # Used for calculating max link thickness if len(repo_notoriety[ID]) > max_links: max_links = len(repo_notoriety[ID]) if len(repo_authors[ID]) > max_authors: - max_authors = len(repo_authors[ID]) # Used for calculating max sphere size in charts + max_authors = len( + repo_authors[ID] + ) # Used for calculating max sphere size in charts # Now, pull it all together! nodes = [] @@ -269,45 +250,44 @@ def run(API, environ, indata, session): for sourceID in repo_notoriety.keys(): lsize = 0 for k in repo_links.keys(): - fr, to = k.split('@') + fr, to = k.split("@") if fr == sourceID or to == sourceID: lsize += 1 asize = len(repo_authors[sourceID]) doc = { - 'id': sourceID, - 'name': sourceID, - 'issues': repo_commits[sourceID], - 'authors': asize, - 'links': lsize, - 'size': max(5, (1 - abs(math.log10(asize / max_authors))) * 45), - 'tooltip': "%u connections, %u contributors, %u issues" % (lsize, asize, repo_commits[sourceID]) + "id": sourceID, + "name": sourceID, + "issues": repo_commits[sourceID], + "authors": asize, + "links": lsize, + "size": max(5, (1 - abs(math.log10(asize / max_authors))) * 45), + "tooltip": "%u connections, %u contributors, %u issues" + % (lsize, asize, repo_commits[sourceID]), } nodes.append(doc) existing_repos.append(sourceID) for k, s in repo_links.items(): size = s - fr, to = k.split('@') + fr, to = k.split("@") if fr in existing_repos and to in existing_repos: doc = { - 'source': fr, - 'target': to, - 'value': max(1, (size/max_shared) * 8), - 'name': "%s ↔ %s" % (fr, to), - 'tooltip': "%u contributors in common" % size + "source": fr, + "target": to, + "value": max(1, (size / max_shared) * 8), + "name": "%s ↔ %s" % (fr, to), + "tooltip": "%u contributors in common" % size, } links.append(doc) JSON_OUT = { - 'maxLinks': max_links, - 'maxShared': max_shared, - 'widgetType': { - 'chartType': 'link' # Recommendation for the UI - }, - 'links': links, - 'nodes': nodes, - 'interval': interval, - 'okay': True, - 'responseTime': time.time() - now + "maxLinks": max_links, + "maxShared": max_shared, + "widgetType": {"chartType": "link"}, # Recommendation for the UI + "links": links, + "nodes": nodes, + "interval": interval, + "okay": True, + "responseTime": time.time() - now, } yield json.dumps(JSON_OUT) diff --git a/kibble/api/pages/issue/retention.py b/kibble/api/pages/issue/retention.py index 1cdecfd8..3766e67d 100644 --- a/kibble/api/pages/issue/retention.py +++ b/kibble/api/pages/issue/retention.py @@ -1,4 +1,3 @@ - # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information @@ -64,9 +63,6 @@ ######################################################################## - - - """ This is the code contributor retention factor renderer for Kibble """ @@ -76,6 +72,7 @@ import re import datetime + def run(API, environ, indata, session): # We need to be logged in for this! @@ -86,13 +83,14 @@ def run(API, environ, indata, session): # First, fetch the view if we have such a thing enabled viewList = [] - if indata.get('view'): - viewList = session.getView(indata.get('view')) - if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - - - hl = indata.get('span', 12) # By default, we define a contributor as active if having committer in the past year + if indata.get("view"): + viewList = session.getView(indata.get("view")) + if indata.get("subfilter"): + viewList = session.subFilter(indata.get("subfilter"), view=viewList) + + hl = indata.get( + "span", 12 + ) # By default, we define a contributor as active if having committer in the past year tnow = datetime.date.today() nm = tnow.month - (tnow.month % 3) ny = tnow.year @@ -109,7 +107,7 @@ def run(API, environ, indata, session): FoundSomething = False ny = 1970 - while ny < cy or (ny == cy and (nm+3) <= tnow.month): + while ny < cy or (ny == cy and (nm + 3) <= tnow.month): d = datetime.date(ny, nm, 1) t = time.mktime(d.timetuple()) nm += 3 @@ -123,76 +121,49 @@ def run(API, environ, indata, session): #################################################################### #################################################################### - dOrg = session.user['defaultOrganisation'] or "apache" + dOrg = session.user["defaultOrganisation"] or "apache" query = { - 'query': { - 'bool': { - 'must': [ - {'range': - { - 'closed': { - 'from': t, - 'to': tf - } - } - }, - { - 'term': { - 'organisation': dOrg - } - } - ] - } - } + "query": { + "bool": { + "must": [ + {"range": {"closed": {"from": t, "to": tf}}}, + {"term": {"organisation": dOrg}}, + ] } + } + } # Source-specific or view-specific?? - if indata.get('source'): - query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) + if indata.get("source"): + query["query"]["bool"]["must"].append( + {"term": {"sourceID": indata.get("source")}} + ) elif viewList: - query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) + query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}}) # Get an initial count of commits - res = session.DB.ES.count( - index=session.DB.dbname, - doc_type="issue", - body = query - ) + res = session.DB.ES.count(index=session.DB.dbname, doc_type="issue", body=query) - globcount = res['count'] + globcount = res["count"] if globcount == 0 and FoundSomething == False: continue FoundSomething = True # Get top 1000 committers this period - query['aggs'] = { - 'by_o': { - 'terms': { - 'field': 'issueCloser', - 'size': 50000 - } - }, - 'by_c': { - 'terms': { - 'field': 'issueCreator', - 'size': 50000 - } - } - } + query["aggs"] = { + "by_o": {"terms": {"field": "issueCloser", "size": 50000}}, + "by_c": {"terms": {"field": "issueCreator", "size": 50000}}, + } res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="issue", - size = 0, - body = query - ) - + index=session.DB.dbname, doc_type="issue", size=0, body=query + ) retained = 0 added = 0 lost = 0 thisPeriod = [] - for bucket in res['aggregations']['by_o']['buckets']: - who = bucket['key'] + for bucket in res["aggregations"]["by_o"]["buckets"]: + who = bucket["key"] thisPeriod.append(who) if who not in peopleSeen: peopleSeen[who] = tf @@ -201,8 +172,8 @@ def run(API, environ, indata, session): if who not in allPeople: allPeople[who] = tf - for bucket in res['aggregations']['by_c']['buckets']: - who = bucket['key'] + for bucket in res["aggregations"]["by_c"]["buckets"]: + who = bucket["key"] thisPeriod.append(who) if who not in peopleSeen: peopleSeen[who] = tf @@ -214,7 +185,7 @@ def run(API, environ, indata, session): prune = [] for k, v in activePeople.items(): - if v < (t - (hl*30.45*86400)): + if v < (t - (hl * 30.45 * 86400)): prune.append(k) lost += 1 @@ -222,46 +193,49 @@ def run(API, environ, indata, session): del activePeople[who] del peopleSeen[who] retained = len(activePeople) - added - ts.append({ - 'date': tf, - 'People who (re)joined': added, - 'People who quit': lost, - 'People retained': retained, - 'Active people': added + retained - }) + ts.append( + { + "date": tf, + "People who (re)joined": added, + "People who quit": lost, + "People retained": retained, + "Active people": added + retained, + } + ) groups = [ - ['More than 5 years', (5*365*86400)+1], - ['2 - 5 years', (2*365*86400)+1], - ['1 - 2 years', (365*86400)], - ['Less than a year', 1] + ["More than 5 years", (5 * 365 * 86400) + 1], + ["2 - 5 years", (2 * 365 * 86400) + 1], + ["1 - 2 years", (365 * 86400)], + ["Less than a year", 1], ] counts = {} totExp = 0 for person, age in activePeople.items(): totExp += time.time() - allPeople[person] - for el in sorted(groups, key = lambda x: x[1], reverse = True): + for el in sorted(groups, key=lambda x: x[1], reverse=True): if allPeople[person] <= time.time() - el[1]: counts[el[0]] = counts.get(el[0], 0) + 1 break - avgyr = (totExp / (86400*365)) / max(len(activePeople),1) + avgyr = (totExp / (86400 * 365)) / max(len(activePeople), 1) - ts = sorted(ts, key = lambda x: x['date']) + ts = sorted(ts, key=lambda x: x["date"]) avgm = "" yr = int(avgyr) - ym = round((avgyr-yr)*12) + ym = round((avgyr - yr) * 12) if yr >= 1: avgm += "%u year%s" % (yr, "s" if yr != 1 else "") if ym > 0: avgm += "%s%u month%s" % (", " if yr > 0 else "", ym, "s" if ym != 1 else "") JSON_OUT = { - 'text': "This shows Contributor retention as calculated over a %u month timespan. The average experience of currently active people is %s." % (hl, avgm), - 'timeseries': ts, - 'counts': counts, - 'averageYears': avgyr, - 'okay': True, - 'responseTime': time.time() - now, + "text": "This shows Contributor retention as calculated over a %u month timespan. The average experience of currently active people is %s." + % (hl, avgm), + "timeseries": ts, + "counts": counts, + "averageYears": avgyr, + "okay": True, + "responseTime": time.time() - now, } yield json.dumps(JSON_OUT) diff --git a/kibble/api/pages/issue/top-count.py b/kibble/api/pages/issue/top-count.py index 4e3f5ae9..38273cee 100644 --- a/kibble/api/pages/issue/top-count.py +++ b/kibble/api/pages/issue/top-count.py @@ -1,4 +1,3 @@ - # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information @@ -62,9 +61,6 @@ ######################################################################## - - - """ This is the TopN repos by commits list renderer for Kibble """ @@ -73,6 +69,7 @@ import time import re + def run(API, environ, indata, session): # We need to be logged in for this! @@ -83,78 +80,61 @@ def run(API, environ, indata, session): # First, fetch the view if we have such a thing enabled viewList = [] - if indata.get('view'): - viewList = session.getView(indata.get('view')) - if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - + if indata.get("view"): + viewList = session.getView(indata.get("view")) + if indata.get("subfilter"): + viewList = session.subFilter(indata.get("subfilter"), view=viewList) - dateTo = indata.get('to', int(time.time())) - dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span + dateTo = indata.get("to", int(time.time())) + dateFrom = indata.get( + "from", dateTo - (86400 * 30 * 6) + ) # Default to a 6 month span #################################################################### #################################################################### - dOrg = session.user['defaultOrganisation'] or "apache" + dOrg = session.user["defaultOrganisation"] or "apache" query = { - 'query': { - 'bool': { - 'must': [ - {'range': - { - 'created': { - 'from': dateFrom, - 'to': dateTo - } - } - }, - { - 'term': { - 'organisation': dOrg - } - } - ] - } - } + "query": { + "bool": { + "must": [ + {"range": {"created": {"from": dateFrom, "to": dateTo}}}, + {"term": {"organisation": dOrg}}, + ] } + } + } # Source-specific or view-specific?? - if indata.get('source'): - query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) + if indata.get("source"): + query["query"]["bool"]["must"].append( + {"term": {"sourceID": indata.get("source")}} + ) elif viewList: - query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) - if indata.get('email'): - query['query']['bool']['should'] = [ - {'term': {'issueCreator': indata.get('email')}}, - {'term': {'issueCloser': indata.get('email')}} + query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}}) + if indata.get("email"): + query["query"]["bool"]["should"] = [ + {"term": {"issueCreator": indata.get("email")}}, + {"term": {"issueCloser": indata.get("email")}}, ] - query['query']['bool']['minimum_should_match'] = 1 - + query["query"]["bool"]["minimum_should_match"] = 1 # Get top 25 committers this period - query['aggs'] = { - 'by_repo': { - 'terms': { - 'field': 'sourceID', - 'size': 5000 - } - } - } + query["aggs"] = {"by_repo": {"terms": {"field": "sourceID", "size": 5000}}} res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="issue", - size = 0, - body = query - ) + index=session.DB.dbname, doc_type="issue", size=0, body=query + ) toprepos = [] - for bucket in res['aggregations']['by_repo']['buckets']: - ID = bucket['key'] - if session.DB.ES.exists(index=session.DB.dbname, doc_type="source", id = ID): - it = session.DB.ES.get(index=session.DB.dbname, doc_type="source", id = ID)['_source'] - repo = re.sub(r".+/([^/]+)$", r"\1", it['sourceURL']) - count = bucket['doc_count'] + for bucket in res["aggregations"]["by_repo"]["buckets"]: + ID = bucket["key"] + if session.DB.ES.exists(index=session.DB.dbname, doc_type="source", id=ID): + it = session.DB.ES.get(index=session.DB.dbname, doc_type="source", id=ID)[ + "_source" + ] + repo = re.sub(r".+/([^/]+)$", r"\1", it["sourceURL"]) + count = bucket["doc_count"] toprepos.append([repo, count]) - toprepos = sorted(toprepos, key = lambda x: x[1], reverse = True) + toprepos = sorted(toprepos, key=lambda x: x[1], reverse=True) top = toprepos[0:24] if len(toprepos) > 25: count = 0 @@ -166,9 +146,5 @@ def run(API, environ, indata, session): for v in top: tophash[v[0]] = v[1] - JSON_OUT = { - 'counts': tophash, - 'okay': True, - 'responseTime': time.time() - now, - } + JSON_OUT = {"counts": tophash, "okay": True, "responseTime": time.time() - now} yield json.dumps(JSON_OUT) diff --git a/kibble/api/pages/issue/top.py b/kibble/api/pages/issue/top.py index 87a05f1b..70fae4d7 100644 --- a/kibble/api/pages/issue/top.py +++ b/kibble/api/pages/issue/top.py @@ -1,4 +1,3 @@ - # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information @@ -62,9 +61,6 @@ ######################################################################## - - - """ This is the issue actors stats page for Kibble """ @@ -73,6 +69,7 @@ import time import hashlib + def run(API, environ, indata, session): # We need to be logged in for this! @@ -83,80 +80,62 @@ def run(API, environ, indata, session): # First, fetch the view if we have such a thing enabled viewList = [] - if indata.get('view'): - viewList = session.getView(indata.get('view')) - if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - - - dateTo = indata.get('to', int(time.time())) - dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span + if indata.get("view"): + viewList = session.getView(indata.get("view")) + if indata.get("subfilter"): + viewList = session.subFilter(indata.get("subfilter"), view=viewList) - interval = indata.get('interval', 'month') + dateTo = indata.get("to", int(time.time())) + dateFrom = indata.get( + "from", dateTo - (86400 * 30 * 6) + ) # Default to a 6 month span + interval = indata.get("interval", "month") #################################################################### #################################################################### - dOrg = session.user['defaultOrganisation'] or "apache" + dOrg = session.user["defaultOrganisation"] or "apache" query = { - 'query': { - 'bool': { - 'must': [ - {'range': - { - 'created': { - 'from': dateFrom, - 'to': dateTo - } - } - }, - { - 'term': { - 'organisation': dOrg - } - } - ] - } - }, - 'sort': { - 'comments': 'desc' - } + "query": { + "bool": { + "must": [ + {"range": {"created": {"from": dateFrom, "to": dateTo}}}, + {"term": {"organisation": dOrg}}, + ] } + }, + "sort": {"comments": "desc"}, + } # Source-specific or view-specific?? - if indata.get('source'): - query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) + if indata.get("source"): + query["query"]["bool"]["must"].append( + {"term": {"sourceID": indata.get("source")}} + ) elif viewList: - query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) - if indata.get('email'): - query['query']['bool']['should'] = [{'term': {'issueCreator': indata.get('email')}}, {'term': {'issueCloser': indata.get('email')}}] - query['query']['bool']['minimum_should_match'] = 1 + query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}}) + if indata.get("email"): + query["query"]["bool"]["should"] = [ + {"term": {"issueCreator": indata.get("email")}}, + {"term": {"issueCloser": indata.get("email")}}, + ] + query["query"]["bool"]["minimum_should_match"] = 1 res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="issue", - size = 25, - body = query - ) + index=session.DB.dbname, doc_type="issue", size=25, body=query + ) top = [] - for bucket in res['hits']['hits']: - doc = bucket['_source'] - doc['source'] = doc.get('url', '#') - doc['name'] = doc.get('key', 'unknown') - doc['subject'] = doc.get('title') - doc['count'] = doc.get('comments', 0) + for bucket in res["hits"]["hits"]: + doc = bucket["_source"] + doc["source"] = doc.get("url", "#") + doc["name"] = doc.get("key", "unknown") + doc["subject"] = doc.get("title") + doc["count"] = doc.get("comments", 0) top.append(doc) - JSON_OUT = { - 'topN': { - 'denoter': 'interactions', - 'icon': 'bug', - 'items': top - }, - 'okay': True, - 'responseTime': time.time() - now, - 'widgetType': { - 'chartType': 'line' - } + "topN": {"denoter": "interactions", "icon": "bug", "items": top}, + "okay": True, + "responseTime": time.time() - now, + "widgetType": {"chartType": "line"}, } yield json.dumps(JSON_OUT) diff --git a/kibble/api/pages/issue/trends.py b/kibble/api/pages/issue/trends.py index 61c528ba..f43971ee 100644 --- a/kibble/api/pages/issue/trends.py +++ b/kibble/api/pages/issue/trends.py @@ -1,4 +1,3 @@ - # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information @@ -62,9 +61,6 @@ ######################################################################## - - - """ This is the Issue trends renderer for Kibble """ @@ -72,6 +68,7 @@ import json import time + def run(API, environ, indata, session): # We need to be logged in for this! @@ -82,20 +79,20 @@ def run(API, environ, indata, session): # First, fetch the view if we have such a thing enabled viewList = [] - if indata.get('view'): - viewList = session.getView(indata.get('view')) - if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - - - dateTo = indata.get('to', int(time.time())) - dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span + if indata.get("view"): + viewList = session.getView(indata.get("view")) + if indata.get("subfilter"): + viewList = session.subFilter(indata.get("subfilter"), view=viewList) + + dateTo = indata.get("to", int(time.time())) + dateFrom = indata.get( + "from", dateTo - (86400 * 30 * 6) + ) # Default to a 6 month span if dateFrom < 0: dateFrom = 0 dateYonder = dateFrom - (dateTo - dateFrom) - - dOrg = session.user['defaultOrganisation'] or "apache" + dOrg = session.user["defaultOrganisation"] or "apache" #################################################################### # We start by doing all the queries for THIS period. # @@ -103,257 +100,164 @@ def run(API, environ, indata, session): # and rerun the same queries. # #################################################################### query = { - 'query': { - 'bool': { - 'must': [ - {'range': - { - 'created': { - 'from': dateFrom, - 'to': dateTo - } - } - }, - { - 'term': { - 'organisation': dOrg - } - } - ] - } - } + "query": { + "bool": { + "must": [ + {"range": {"created": {"from": dateFrom, "to": dateTo}}}, + {"term": {"organisation": dOrg}}, + ] } + } + } # Source-specific or view-specific?? - if indata.get('source'): - query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) + if indata.get("source"): + query["query"]["bool"]["must"].append( + {"term": {"sourceID": indata.get("source")}} + ) elif viewList: - query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) - if indata.get('email'): - query['query']['bool']['should'] = [{'term': {'issueCreator': indata.get('email')}}, {'term': {'issueCloser': indata.get('email')}}] - query['query']['bool']['minimum_should_match'] = 1 + query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}}) + if indata.get("email"): + query["query"]["bool"]["should"] = [ + {"term": {"issueCreator": indata.get("email")}}, + {"term": {"issueCloser": indata.get("email")}}, + ] + query["query"]["bool"]["minimum_should_match"] = 1 # Get number of issues created, this period - res = session.DB.ES.count( - index=session.DB.dbname, - doc_type="issue", - body = query - ) - no_issues_created = res['count'] - + res = session.DB.ES.count(index=session.DB.dbname, doc_type="issue", body=query) + no_issues_created = res["count"] # Get number of open/close, this period - query['aggs'] = { - 'opener': { - 'cardinality': { - 'field': 'issueCreator' - } - } - } + query["aggs"] = {"opener": {"cardinality": {"field": "issueCreator"}}} res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="issue", - size = 0, - body = query - ) - no_creators = res['aggregations']['opener']['value'] - + index=session.DB.dbname, doc_type="issue", size=0, body=query + ) + no_creators = res["aggregations"]["opener"]["value"] # CLOSERS query = { - 'query': { - 'bool': { - 'must': [ - {'range': - { - 'closed': { - 'from': dateFrom, - 'to': dateTo - } - } - }, - { - 'term': { - 'organisation': dOrg - } - } - ] - } - } + "query": { + "bool": { + "must": [ + {"range": {"closed": {"from": dateFrom, "to": dateTo}}}, + {"term": {"organisation": dOrg}}, + ] } + } + } # Source-specific or view-specific?? - if indata.get('source'): - query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) + if indata.get("source"): + query["query"]["bool"]["must"].append( + {"term": {"sourceID": indata.get("source")}} + ) elif viewList: - query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) - if indata.get('email'): - query['query']['bool']['should'] = [{'term': {'issueCreator': indata.get('email')}}, {'term': {'issueCloser': indata.get('email')}}] - query['query']['bool']['minimum_should_match'] = 1 + query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}}) + if indata.get("email"): + query["query"]["bool"]["should"] = [ + {"term": {"issueCreator": indata.get("email")}}, + {"term": {"issueCloser": indata.get("email")}}, + ] + query["query"]["bool"]["minimum_should_match"] = 1 # Get number of issues created, this period - res = session.DB.ES.count( - index=session.DB.dbname, - doc_type="issue", - body = query - ) - no_issues_closed = res['count'] - + res = session.DB.ES.count(index=session.DB.dbname, doc_type="issue", body=query) + no_issues_closed = res["count"] # Get number of open/close, this period - query['aggs'] = { - 'closer': { - 'cardinality': { - 'field': 'issueCloser' - } - } - } + query["aggs"] = {"closer": {"cardinality": {"field": "issueCloser"}}} res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="issue", - size = 0, - body = query - ) - no_closers = res['aggregations']['closer']['value'] - - + index=session.DB.dbname, doc_type="issue", size=0, body=query + ) + no_closers = res["aggregations"]["closer"]["value"] #################################################################### # Change to PRIOR SPAN # #################################################################### query = { - 'query': { - 'bool': { - 'must': [ - {'range': - { - 'created': { - 'from': dateYonder, - 'to': dateFrom-1 - } - } - }, - { - 'term': { - 'organisation': dOrg - } - } - ] - } - } + "query": { + "bool": { + "must": [ + {"range": {"created": {"from": dateYonder, "to": dateFrom - 1}}}, + {"term": {"organisation": dOrg}}, + ] } + } + } if viewList: - query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) - if indata.get('email'): - query['query']['bool']['should'] = [{'term': {'issueCreator': indata.get('email')}}, {'term': {'issueCloser': indata.get('email')}}] - query['query']['bool']['minimum_should_match'] = 1 + query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}}) + if indata.get("email"): + query["query"]["bool"]["should"] = [ + {"term": {"issueCreator": indata.get("email")}}, + {"term": {"issueCloser": indata.get("email")}}, + ] + query["query"]["bool"]["minimum_should_match"] = 1 # Get number of issues, this period - res = session.DB.ES.count( - index=session.DB.dbname, - doc_type="issue", - body = query - ) - no_issues_created_before = res['count'] + res = session.DB.ES.count(index=session.DB.dbname, doc_type="issue", body=query) + no_issues_created_before = res["count"] # Get number of committers, this period - query['aggs'] = { - 'opener': { - 'cardinality': { - 'field': 'issueCreator' - } - } - } + query["aggs"] = {"opener": {"cardinality": {"field": "issueCreator"}}} res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="issue", - size = 0, - body = query - ) - no_creators_before = res['aggregations']['opener']['value'] - - + index=session.DB.dbname, doc_type="issue", size=0, body=query + ) + no_creators_before = res["aggregations"]["opener"]["value"] # CLOSERS query = { - 'query': { - 'bool': { - 'must': [ - {'range': - { - 'closed': { - 'from': dateYonder, - 'to': dateFrom-1 - } - } - }, - { - 'term': { - 'organisation': dOrg - } - } - ] - } - } + "query": { + "bool": { + "must": [ + {"range": {"closed": {"from": dateYonder, "to": dateFrom - 1}}}, + {"term": {"organisation": dOrg}}, + ] } + } + } if viewList: - query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) - if indata.get('email'): - query['query']['bool']['should'] = [{'term': {'issueCreator': indata.get('email')}}, {'term': {'issueCloser': indata.get('email')}}] - query['query']['bool']['minimum_should_match'] = 1 + query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}}) + if indata.get("email"): + query["query"]["bool"]["should"] = [ + {"term": {"issueCreator": indata.get("email")}}, + {"term": {"issueCloser": indata.get("email")}}, + ] + query["query"]["bool"]["minimum_should_match"] = 1 # Get number of issues created, this period - res = session.DB.ES.count( - index=session.DB.dbname, - doc_type="issue", - body = query - ) - no_issues_closed_before = res['count'] - + res = session.DB.ES.count(index=session.DB.dbname, doc_type="issue", body=query) + no_issues_closed_before = res["count"] # Get number of open/close, this period - query['aggs'] = { - 'closer': { - 'cardinality': { - 'field': 'issueCloser' - } - } - } + query["aggs"] = {"closer": {"cardinality": {"field": "issueCloser"}}} res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="issue", - size = 0, - body = query - ) - no_closers_before = res['aggregations']['closer']['value'] - + index=session.DB.dbname, doc_type="issue", size=0, body=query + ) + no_closers_before = res["aggregations"]["closer"]["value"] trends = { "created": { - 'before': no_issues_created_before, - 'after': no_issues_created, - 'title': "Issues opened this period" + "before": no_issues_created_before, + "after": no_issues_created, + "title": "Issues opened this period", }, "authors": { - 'before': no_creators_before, - 'after': no_creators, - 'title': "People opening issues this period" + "before": no_creators_before, + "after": no_creators, + "title": "People opening issues this period", }, "closed": { - 'before': no_issues_closed_before, - 'after': no_issues_closed, - 'title': "Issues closed this period" + "before": no_issues_closed_before, + "after": no_issues_closed, + "title": "Issues closed this period", }, "closers": { - 'before': no_closers_before, - 'after': no_closers, - 'title': "People closing issues this period" - } + "before": no_closers_before, + "after": no_closers, + "title": "People closing issues this period", + }, } - JSON_OUT = { - 'trends': trends, - 'okay': True, - 'responseTime': time.time() - now - } + JSON_OUT = {"trends": trends, "okay": True, "responseTime": time.time() - now} yield json.dumps(JSON_OUT) diff --git a/kibble/api/pages/mail/keyphrases.py b/kibble/api/pages/mail/keyphrases.py index ed03282b..ead6d4bf 100644 --- a/kibble/api/pages/mail/keyphrases.py +++ b/kibble/api/pages/mail/keyphrases.py @@ -1,4 +1,3 @@ - # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information @@ -62,9 +61,6 @@ ######################################################################## - - - """ This is the common key phrases renderer for Kibble """ @@ -73,6 +69,7 @@ import time import hashlib + def run(API, environ, indata, session): # We need to be logged in for this! @@ -83,76 +80,52 @@ def run(API, environ, indata, session): # First, fetch the view if we have such a thing enabled viewList = [] - if indata.get('view'): - viewList = session.getView(indata.get('view')) - if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - + if indata.get("view"): + viewList = session.getView(indata.get("view")) + if indata.get("subfilter"): + viewList = session.subFilter(indata.get("subfilter"), view=viewList) - dateTo = indata.get('to', int(time.time())) - dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span - - interval = indata.get('interval', 'month') + dateTo = indata.get("to", int(time.time())) + dateFrom = indata.get( + "from", dateTo - (86400 * 30 * 6) + ) # Default to a 6 month span + interval = indata.get("interval", "month") #################################################################### #################################################################### - dOrg = session.user['defaultOrganisation'] or "apache" + dOrg = session.user["defaultOrganisation"] or "apache" query = { - 'query': { - 'bool': { - 'must': [ - {'range': - { - 'ts': { - 'from': dateFrom, - 'to': dateTo - } - } - }, - { - 'term': { - 'organisation': dOrg - } - } - ] - } - }, - 'aggs': { - 'kpe': { - 'terms': { - 'field': 'kpe.keyword', - 'size': 50 - } - } - } + "query": { + "bool": { + "must": [ + {"range": {"ts": {"from": dateFrom, "to": dateTo}}}, + {"term": {"organisation": dOrg}}, + ] } + }, + "aggs": {"kpe": {"terms": {"field": "kpe.keyword", "size": 50}}}, + } # Source-specific or view-specific?? - if indata.get('source'): - query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) + if indata.get("source"): + query["query"]["bool"]["must"].append( + {"term": {"sourceID": indata.get("source")}} + ) elif viewList: - query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) + query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}}) res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="email", - size = 0, - body = query - ) + index=session.DB.dbname, doc_type="email", size=0, body=query + ) topN = [] - for bucket in res['aggregations']['kpe']['buckets']: - topN.append( { - 'phrase': bucket['key'], - 'count': bucket['doc_count'] - }) + for bucket in res["aggregations"]["kpe"]["buckets"]: + topN.append({"phrase": bucket["key"], "count": bucket["doc_count"]}) JSON_OUT = { - 'widgetType': { - 'chartType': 'bar' - }, - 'phrases': topN, - 'okay': True, - 'responseTime': time.time() - now + "widgetType": {"chartType": "bar"}, + "phrases": topN, + "okay": True, + "responseTime": time.time() - now, } yield json.dumps(JSON_OUT) diff --git a/kibble/api/pages/mail/map.py b/kibble/api/pages/mail/map.py index 14f7170f..ba188ea1 100644 --- a/kibble/api/pages/mail/map.py +++ b/kibble/api/pages/mail/map.py @@ -1,4 +1,3 @@ - # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information @@ -62,9 +61,6 @@ ######################################################################## - - - """ This is the committer relationship list renderer for Kibble """ @@ -78,6 +74,7 @@ badBots = r"(JIRA|Hudson|jira|jenkins|GitHub|git@|dev@|bugzilla|gerrit)" + def run(API, environ, indata, session): # We need to be logged in for this! @@ -88,69 +85,61 @@ def run(API, environ, indata, session): # First, fetch the view if we have such a thing enabled viewList = [] - if indata.get('view'): - viewList = session.getView(indata.get('view')) - if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - - dateTo = indata.get('to', int(time.time())) - dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span + if indata.get("view"): + viewList = session.getView(indata.get("view")) + if indata.get("subfilter"): + viewList = session.subFilter(indata.get("subfilter"), view=viewList) + + dateTo = indata.get("to", int(time.time())) + dateFrom = indata.get( + "from", dateTo - (86400 * 30 * 6) + ) # Default to a 6 month span span = dateTo - dateFrom #################################################################### #################################################################### - dOrg = session.user['defaultOrganisation'] or "apache" + dOrg = session.user["defaultOrganisation"] or "apache" query = { - 'query': { - 'bool': { - 'must': [ - {'range': - { - 'ts': { - 'from': dateFrom, - 'to': dateTo - } - } - }, - { - 'term': { - 'organisation': dOrg - } - } - ] - } - } + "query": { + "bool": { + "must": [ + {"range": {"ts": {"from": dateFrom, "to": dateTo}}}, + {"term": {"organisation": dOrg}}, + ] } + } + } # Source-specific or view-specific?? - if indata.get('source'): - query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) + if indata.get("source"): + query["query"]["bool"]["must"].append( + {"term": {"sourceID": indata.get("source")}} + ) elif viewList: - query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) - if indata.get('search'): - query['query']['bool']['must'].append({'regexp': {'subject': indata.get('search')}}) + query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}}) + if indata.get("search"): + query["query"]["bool"]["must"].append( + {"regexp": {"subject": indata.get("search")}} + ) - if indata.get('email'): - query['query']['bool']['minimum_should_match'] = 1 - query['query']['bool']['should'] = [ - {'term': {'replyto.keyword': indata.get('email')}}, - {'term': {'sender': indata.get('email')}}, - ] + if indata.get("email"): + query["query"]["bool"]["minimum_should_match"] = 1 + query["query"]["bool"]["should"] = [ + {"term": {"replyto.keyword": indata.get("email")}}, + {"term": {"sender": indata.get("email")}}, + ] # Get number of commits, this period, per repo - query['aggs'] = { - 'per_ml': { - 'terms': { - 'field': 'replyto.keyword' if not indata.get('author') else 'sender', - 'size': 150 - } + query["aggs"] = { + "per_ml": { + "terms": { + "field": "replyto.keyword" if not indata.get("author") else "sender", + "size": 150, } } + } res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="email", - size = 0, - body = query - ) + index=session.DB.dbname, doc_type="email", size=0, body=query + ) repos = {} repo_commits = {} @@ -159,43 +148,49 @@ def run(API, environ, indata, session): max_links = 0 max_shared = 0 max_authors = 0 - minLinks = indata.get('links', 1) + minLinks = indata.get("links", 1) - if indata.get('email'): - del query['query']['bool']['should'] - del query['query']['bool']['minimum_should_match'] + if indata.get("email"): + del query["query"]["bool"]["should"] + del query["query"]["bool"]["minimum_should_match"] # For each repo, count commits and gather data on authors - for doc in res['aggregations']['per_ml']['buckets']: - sourceID = doc['key'] - emails = doc['doc_count'] - if re.search(badBots, sourceID): # No bots + for doc in res["aggregations"]["per_ml"]["buckets"]: + sourceID = doc["key"] + emails = doc["doc_count"] + if re.search(badBots, sourceID): # No bots continue - if emails > (span/86400)*4: # More than 4/day and we consider you a bot! + if emails > (span / 86400) * 4: # More than 4/day and we consider you a bot! continue - # Gather the unique authors/committers - query['aggs'] = { - 'per_ml': { - 'terms': { - 'field': 'sender' if not indata.get('author') else 'replyto.keyword', - 'size': 5000 + query["aggs"] = { + "per_ml": { + "terms": { + "field": "sender" + if not indata.get("author") + else "replyto.keyword", + "size": 5000, } } } xquery = copy.deepcopy(query) - xquery['query']['bool']['must'].append({'term': {'replyto.keyword' if not indata.get('author') else 'sender': sourceID}}) + xquery["query"]["bool"]["must"].append( + { + "term": { + "replyto.keyword" + if not indata.get("author") + else "sender": sourceID + } + } + ) xres = session.DB.ES.search( - index=session.DB.dbname, - doc_type="email", - size = 0, - body = xquery + index=session.DB.dbname, doc_type="email", size=0, body=xquery ) authors = [] - for person in xres['aggregations']['per_ml']['buckets']: - pk = person['key'] + for person in xres["aggregations"]["per_ml"]["buckets"]: + pk = person["key"] authors.append(pk) if emails > max_emails: max_emails = emails @@ -211,10 +206,14 @@ def run(API, environ, indata, session): # Grab data of all sources for ID, repo in repos.items(): mylinks = {} - hID = hashlib.sha1( ("%s%s" % (dOrg, ID)).encode('ascii', errors='replace')).hexdigest() - if not session.DB.ES.exists(index=session.DB.dbname, doc_type="person", id = hID): + hID = hashlib.sha1( + ("%s%s" % (dOrg, ID)).encode("ascii", errors="replace") + ).hexdigest() + if not session.DB.ES.exists(index=session.DB.dbname, doc_type="person", id=hID): continue - repodatas[ID] = session.DB.ES.get(index=session.DB.dbname, doc_type="person", id = hID) + repodatas[ID] = session.DB.ES.get( + index=session.DB.dbname, doc_type="person", id=hID + ) for ID, repo in repos.items(): mylinks = {} @@ -222,47 +221,57 @@ def run(API, environ, indata, session): continue repodata = repodatas[ID] oID = ID - if indata.get('collapse'): - m = re.search(indata.get('collapse'), repodata['_source']['email']) + if indata.get("collapse"): + m = re.search(indata.get("collapse"), repodata["_source"]["email"]) if m: ID = m.group(1) xlinks = [] for xID, xrepo in repos.items(): if xID in repodatas: xrepodata = repodatas[xID] - if indata.get('collapse'): - m = re.search(indata.get('collapse'), xrepodata['_source']['email']) + if indata.get("collapse"): + m = re.search(indata.get("collapse"), xrepodata["_source"]["email"]) if m: xID = m.group(1) if xID != ID: if ID in xrepo: xlinks.append(xID) - lname = "%s||%s" % (ID, xID) # Link name - rname = "%s||%s" % (xID, ID) # Reverse link name - if len(xlinks) > 0 and rname not in repo_links and len(xlinks) >= minLinks: + lname = "%s||%s" % (ID, xID) # Link name + rname = "%s||%s" % (xID, ID) # Reverse link name + if ( + len(xlinks) > 0 + and rname not in repo_links + and len(xlinks) >= minLinks + ): mylinks[ID] = mylinks.get(ID, 0) + 1 - repo_links[lname] = repo_links.get(lname, 0) + len(xlinks) # How many contributors in common between project A and B? + repo_links[lname] = repo_links.get(lname, 0) + len( + xlinks + ) # How many contributors in common between project A and B? if repo_links[lname] > max_shared: max_shared = repo_links[lname] elif rname in repo_links: repo_links[rname] = repo_links.get(rname, 0) + len(xlinks) if ID not in repo_notoriety: repo_notoriety[ID] = set() - repo_notoriety[ID].update(mylinks.keys()) # How many projects is this repo connected to? + repo_notoriety[ID].update( + mylinks.keys() + ) # How many projects is this repo connected to? if ID not in repo_authors: repo_authors[ID] = set() - repo_authors[ID].update(repo) # How many projects is this repo connected to? + repo_authors[ID].update(repo) # How many projects is this repo connected to? if ID != oID: repo_commits[ID] = repo_commits.get(ID, 0) + repo_commits[oID] if repo_commits[ID] > max_emails: - max_emails = repo_commits[ID] # Used for calculating max link thickness + max_emails = repo_commits[ID] # Used for calculating max link thickness if len(repo_notoriety[ID]) > max_links: max_links = len(repo_notoriety[ID]) if len(repo_authors[ID]) > max_authors: - max_authors = len(repo_authors[ID]) # Used for calculating max sphere size in charts + max_authors = len( + repo_authors[ID] + ) # Used for calculating max sphere size in charts # Now, pull it all together! nodes = [] @@ -271,45 +280,46 @@ def run(API, environ, indata, session): for sourceID, ns in repo_notoriety.items(): lsize = 0 for k in repo_links.keys(): - fr, to = k.split('||') + fr, to = k.split("||") if fr == sourceID or to == sourceID: lsize += 1 asize = len(repo_authors[sourceID]) doc = { - 'id': sourceID, - 'gravatar': hashlib.md5(sourceID.lower().encode('utf-8')).hexdigest(), - 'name': repodatas[sourceID]['_source'].get('name', sourceID), - 'replies': repo_commits[sourceID], - 'authors': asize, - 'links': lsize, - 'size': max(5, (1 - abs(math.log10(repo_commits[sourceID] / max_emails))) * 45), - 'tooltip': "%u connections, %u fellows, %u replies to" % (lsize, asize, repo_commits[sourceID]) + "id": sourceID, + "gravatar": hashlib.md5(sourceID.lower().encode("utf-8")).hexdigest(), + "name": repodatas[sourceID]["_source"].get("name", sourceID), + "replies": repo_commits[sourceID], + "authors": asize, + "links": lsize, + "size": max( + 5, (1 - abs(math.log10(repo_commits[sourceID] / max_emails))) * 45 + ), + "tooltip": "%u connections, %u fellows, %u replies to" + % (lsize, asize, repo_commits[sourceID]), } nodes.append(doc) existing_repos.append(sourceID) for k, s in repo_links.items(): size = s - fr, to = k.split('||') + fr, to = k.split("||") if fr in existing_repos and to in existing_repos: doc = { - 'source': fr, - 'target': to, - 'value': max(1, (size/max_shared) * 5), - 'name': "%s ↔ %s" % (fr, to), - 'tooltip': "%u topics exchanged" % size + "source": fr, + "target": to, + "value": max(1, (size / max_shared) * 5), + "name": "%s ↔ %s" % (fr, to), + "tooltip": "%u topics exchanged" % size, } links.append(doc) JSON_OUT = { - 'maxLinks': max_links, - 'maxShared': max_shared, - 'widgetType': { - 'chartType': 'link' # Recommendation for the UI - }, - 'links': links, - 'nodes': nodes, - 'okay': True, - 'responseTime': time.time() - now + "maxLinks": max_links, + "maxShared": max_shared, + "widgetType": {"chartType": "link"}, # Recommendation for the UI + "links": links, + "nodes": nodes, + "okay": True, + "responseTime": time.time() - now, } yield json.dumps(JSON_OUT) diff --git a/kibble/api/pages/mail/mood-timeseries.py b/kibble/api/pages/mail/mood-timeseries.py index 60cc33e3..ff7153f1 100644 --- a/kibble/api/pages/mail/mood-timeseries.py +++ b/kibble/api/pages/mail/mood-timeseries.py @@ -1,4 +1,3 @@ - # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information @@ -62,9 +61,6 @@ ######################################################################## - - - """ This is the email mood timeseries renderer for Kibble """ @@ -72,109 +68,83 @@ import json import time + def run(API, environ, indata, session): # We need to be logged in for this! if not session.user: raise API.exception(403, "You must be logged in to use this API endpoint! %s") - # First, fetch the view if we have such a thing enabled viewList = [] - if indata.get('view'): - viewList = session.getView(indata.get('view')) - if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) + if indata.get("view"): + viewList = session.getView(indata.get("view")) + if indata.get("subfilter"): + viewList = session.subFilter(indata.get("subfilter"), view=viewList) - dateTo = indata.get('to', int(time.time())) - dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span - interval = indata.get('interval', 'week') + dateTo = indata.get("to", int(time.time())) + dateFrom = indata.get( + "from", dateTo - (86400 * 30 * 6) + ) # Default to a 6 month span + interval = indata.get("interval", "week") # Define moods we know of - moods_good = set(['trust', 'joy', 'confident', 'positive']) - moods_bad = set(['sadness', 'anger', 'disgust', 'fear', 'negative']) - moods_neutral = set(['anticipation', 'surprise', 'tentative', 'analytical', 'neutral']) + moods_good = set(["trust", "joy", "confident", "positive"]) + moods_bad = set(["sadness", "anger", "disgust", "fear", "negative"]) + moods_neutral = set( + ["anticipation", "surprise", "tentative", "analytical", "neutral"] + ) all_moods = set(moods_good | moods_bad | moods_neutral) # Fetch all sources for default org - dOrg = session.user['defaultOrganisation'] or "apache" + dOrg = session.user["defaultOrganisation"] or "apache" query = { - 'query': { - 'bool': { - 'must': [ - {'range': - { - 'ts': { - 'from': dateFrom, - 'to': dateTo - } - } - }, - { - 'term': { - 'organisation': dOrg - } - }, - { 'exists': { - 'field': 'mood' - } - } - ] - } - } + "query": { + "bool": { + "must": [ + {"range": {"ts": {"from": dateFrom, "to": dateTo}}}, + {"term": {"organisation": dOrg}}, + {"exists": {"field": "mood"}}, + ] } + } + } # Source-specific or view-specific?? - if indata.get('source'): - query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) + if indata.get("source"): + query["query"]["bool"]["must"].append( + {"term": {"sourceID": indata.get("source")}} + ) elif viewList: - query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) - - emls = session.DB.ES.count( - index=session.DB.dbname, - doc_type="email", - body = query - )['count'] - - query['aggs'] = { - 'history': { - 'date_histogram': { - 'field': 'date', - 'interval': interval - }, - 'aggs': { - } - } + query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}}) + + emls = session.DB.ES.count(index=session.DB.dbname, doc_type="email", body=query)[ + "count" + ] + + query["aggs"] = { + "history": { + "date_histogram": {"field": "date", "interval": interval}, + "aggs": {}, + } } # Add aggregations for moods for mood in all_moods: - query['aggs']['history']['aggs'][mood] = { - 'sum': { - 'field': "mood.%s" % mood - } - } - + query["aggs"]["history"]["aggs"][mood] = {"sum": {"field": "mood.%s" % mood}} res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="email", - size = 0, - body = query - ) + index=session.DB.dbname, doc_type="email", size=0, body=query + ) timeseries = [] - - for tz in res['aggregations']['history']['buckets']: + for tz in res["aggregations"]["history"]["buckets"]: moods = {} - emls = tz['doc_count'] + emls = tz["doc_count"] for mood in all_moods: - moods[mood] = int (100 * tz.get(mood, {'value':0})['value'] / max(1, emls)) - moods['date'] = int(tz['key']/1000) + moods[mood] = int(100 * tz.get(mood, {"value": 0})["value"] / max(1, emls)) + moods["date"] = int(tz["key"] / 1000) timeseries.append(moods) - JSON_OUT = { - 'timeseries': timeseries, - 'okay': True - } + JSON_OUT = {"timeseries": timeseries, "okay": True} yield json.dumps(JSON_OUT) diff --git a/kibble/api/pages/mail/mood.py b/kibble/api/pages/mail/mood.py index ae51fb19..cdd9b1d8 100644 --- a/kibble/api/pages/mail/mood.py +++ b/kibble/api/pages/mail/mood.py @@ -1,4 +1,3 @@ - # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information @@ -62,9 +61,6 @@ ######################################################################## - - - """ This is the email mood renderer for Kibble """ @@ -72,75 +68,56 @@ import json import time + def run(API, environ, indata, session): # We need to be logged in for this! if not session.user: raise API.exception(403, "You must be logged in to use this API endpoint! %s") - # First, fetch the view if we have such a thing enabled viewList = [] - if indata.get('view'): - viewList = session.getView(indata.get('view')) - if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) + if indata.get("view"): + viewList = session.getView(indata.get("view")) + if indata.get("subfilter"): + viewList = session.subFilter(indata.get("subfilter"), view=viewList) - dateTo = indata.get('to', int(time.time())) - dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span + dateTo = indata.get("to", int(time.time())) + dateFrom = indata.get( + "from", dateTo - (86400 * 30 * 6) + ) # Default to a 6 month span # Define moods we know of - moods_good = set(['trust', 'joy', 'confident', 'positive']) - moods_bad = set(['sadness', 'anger', 'disgust', 'fear', 'negative']) - moods_neutral = set(['anticipation', 'surprise', 'tentative', 'analytical', 'neutral']) + moods_good = set(["trust", "joy", "confident", "positive"]) + moods_bad = set(["sadness", "anger", "disgust", "fear", "negative"]) + moods_neutral = set( + ["anticipation", "surprise", "tentative", "analytical", "neutral"] + ) all_moods = set(moods_good | moods_bad | moods_neutral) # Start off with a query for the entire org (we want to compare) - dOrg = session.user['defaultOrganisation'] or "apache" + dOrg = session.user["defaultOrganisation"] or "apache" query = { - 'query': { - 'bool': { - 'must': [ - {'range': - { - 'ts': { - 'from': dateFrom, - 'to': dateTo - } - } - }, - { - 'term': { - 'organisation': dOrg - } - }, - { 'exists': { - 'field': 'mood' - } - } - ] - } - } + "query": { + "bool": { + "must": [ + {"range": {"ts": {"from": dateFrom, "to": dateTo}}}, + {"term": {"organisation": dOrg}}, + {"exists": {"field": "mood"}}, + ] } + } + } # Count all emails, for averaging scores - gemls = session.DB.ES.count( - index=session.DB.dbname, - doc_type="email", - body = query - )['count'] + gemls = session.DB.ES.count(index=session.DB.dbname, doc_type="email", body=query)[ + "count" + ] # Add aggregations for moods - query['aggs'] = { - - } + query["aggs"] = {} for mood in all_moods: - query['aggs'][mood] = { - 'sum': { - 'field': "mood.%s" % mood - } - } - + query["aggs"][mood] = {"sum": {"field": "mood.%s" % mood}} global_mood_compiled = {} mood_compiled = {} @@ -148,74 +125,72 @@ def run(API, environ, indata, session): gtxt = "This shows the overall estimated mood as a gauge from terrible to good." # If we're comparing against all lists, first do a global query # and compile moods overall - if indata.get('relative'): + if indata.get("relative"): txt = "This chart shows the ten potential mood types on the selected lists as they compare against all mailing lists in the database. A score of 100 here means the sentiment conforms to averages across all lists." gtxt = "This shows the overall estimated mood compared to all lists, as a gauge from terrible to good." global_moods = {} gres = session.DB.ES.search( - index=session.DB.dbname, - doc_type="email", - size = 0, - body = query - ) - for mood, el in gres['aggregations'].items(): + index=session.DB.dbname, doc_type="email", size=0, body=query + ) + for mood, el in gres["aggregations"].items(): # If a mood is not present (iow sum is 0), remove it from the equation by setting to -1 - if el['value'] == 0: - el['value'] == -1 - global_moods[mood] = el['value'] + if el["value"] == 0: + el["value"] == -1 + global_moods[mood] = el["value"] for k, v in global_moods.items(): if v >= 0: - global_mood_compiled[k] = int( (v / max(1,gemls)) * 100) + global_mood_compiled[k] = int((v / max(1, gemls)) * 100) # Now, if we have a view (or not distinguishing), ... ss = False - if indata.get('source'): - query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) + if indata.get("source"): + query["query"]["bool"]["must"].append( + {"term": {"sourceID": indata.get("source")}} + ) ss = True elif viewList: - query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) + query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}}) ss = True # If we have a view enabled (and distinguish), compile local view against global view # Else, just copy global as local - if ss or not indata.get('relative'): + if ss or not indata.get("relative"): res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="email", - size = 0, - body = query - ) + index=session.DB.dbname, doc_type="email", size=0, body=query + ) - del query['aggs'] # we have to remove these to do a count() + del query["aggs"] # we have to remove these to do a count() emls = session.DB.ES.count( - index=session.DB.dbname, - doc_type="email", - body = query - )['count'] + index=session.DB.dbname, doc_type="email", body=query + )["count"] moods = {} years = 0 - for mood, el in res['aggregations'].items(): - if el['value'] == 0: - el['value'] == -1 - moods[mood] = el['value'] + for mood, el in res["aggregations"].items(): + if el["value"] == 0: + el["value"] == -1 + moods[mood] = el["value"] for k, v in moods.items(): if v > 0: - mood_compiled[k] = int(100 * int( ( v / max(1,emls)) * 100) / max(1, global_mood_compiled.get(k, 100))) + mood_compiled[k] = int( + 100 + * int((v / max(1, emls)) * 100) + / max(1, global_mood_compiled.get(k, 100)) + ) else: mood_compiled = global_mood_compiled # If relative mode and a field is missing, assume 100 (norm) - if indata.get('relative'): + if indata.get("relative"): for M in all_moods: if mood_compiled.get(M, 0) == 0: mood_compiled[M] = 100 # Compile an overall happiness level - MAX = max(max(mood_compiled.values()),1) - X = 100 if indata.get('relative') else 0 + MAX = max(max(mood_compiled.values()), 1) + X = 100 if indata.get("relative") else 0 bads = X for B in moods_bad: if mood_compiled.get(B) and mood_compiled[B] > X: @@ -229,21 +204,17 @@ def run(API, environ, indata, session): goods += mood_compiled[B] MAX = max(MAX, bads, goods) if bads > 0: - happ -= (50*bads/MAX) + happ -= 50 * bads / MAX if goods > 0: - happ += (50*goods/MAX) + happ += 50 * goods / MAX swingometer = max(0, min(100, happ)) # JSON out! JSON_OUT = { - 'relativeMode': True, - 'text': txt, - 'counts': mood_compiled, - 'okay': True, - 'gauge': { - 'key': 'Happiness', - 'value': swingometer, - 'text': gtxt - } + "relativeMode": True, + "text": txt, + "counts": mood_compiled, + "okay": True, + "gauge": {"key": "Happiness", "value": swingometer, "text": gtxt}, } yield json.dumps(JSON_OUT) diff --git a/kibble/api/pages/mail/pony-timeseries.py b/kibble/api/pages/mail/pony-timeseries.py index 033d7997..578ae70c 100644 --- a/kibble/api/pages/mail/pony-timeseries.py +++ b/kibble/api/pages/mail/pony-timeseries.py @@ -1,4 +1,3 @@ - # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information @@ -62,9 +61,6 @@ ######################################################################## - - - """ This is the pony factor renderer for Kibble """ @@ -75,6 +71,7 @@ import datetime import dateutil.relativedelta + def run(API, environ, indata, session): # We need to be logged in for this! @@ -85,12 +82,12 @@ def run(API, environ, indata, session): # First, fetch the view if we have such a thing enabled viewList = [] - if indata.get('view'): - viewList = session.getView(indata.get('view')) - if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) + if indata.get("view"): + viewList = session.getView(indata.get("view")) + if indata.get("subfilter"): + viewList = session.subFilter(indata.get("subfilter"), view=viewList) - hl = indata.get('span', 24) + hl = indata.get("span", 24) tnow = datetime.date.today() nm = tnow.month - (tnow.month % 3) ny = tnow.year @@ -110,101 +107,73 @@ def run(API, environ, indata, session): nm += 12 ny = ny - 1 - #################################################################### #################################################################### - dOrg = session.user['defaultOrganisation'] or "apache" + dOrg = session.user["defaultOrganisation"] or "apache" query = { - 'query': { - 'bool': { - 'must': [ - {'range': - { - 'ts': { - 'from': tf, - 'to': t - } - } - }, - { - 'term': { - 'organisation': dOrg - } - } - ], - 'must_not': [ - { - 'match': { - 'sourceURL': 'commits*' - } - } - ] - } - } + "query": { + "bool": { + "must": [ + {"range": {"ts": {"from": tf, "to": t}}}, + {"term": {"organisation": dOrg}}, + ], + "must_not": [{"match": {"sourceURL": "commits*"}}], } + } + } # Source-specific or view-specific?? - if indata.get('source'): - query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) + if indata.get("source"): + query["query"]["bool"]["must"].append( + {"term": {"sourceID": indata.get("source")}} + ) elif viewList: - query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) + query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}}) # Get an initial count of commits - res = session.DB.ES.count( - index=session.DB.dbname, - doc_type="email", - body = query - ) + res = session.DB.ES.count(index=session.DB.dbname, doc_type="email", body=query) - globcount = res['count'] + globcount = res["count"] if globcount == 0: break # Get top 25 committers this period - query['aggs'] = { - 'by_sender': { - 'terms': { - 'field': 'sender', - 'size': 2500 - } - } - } + query["aggs"] = {"by_sender": {"terms": {"field": "sender", "size": 2500}}} res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="email", - size = 0, - body = query - ) - + index=session.DB.dbname, doc_type="email", size=0, body=query + ) # PF for authors pf_author = 0 pf_author_count = 0 cpf = {} - for bucket in res['aggregations']['by_sender']['buckets']: - count = bucket['doc_count'] + for bucket in res["aggregations"]["by_sender"]["buckets"]: + count = bucket["doc_count"] # Assume anyone sending > 10 emails per day is a bot (or a commit list)! - if count > (10*365*hl): + if count > (10 * 365 * hl): globcount -= count continue pf_author += 1 pf_author_count += count - if '@' in bucket['key']: - mldom = bucket['key'].lower().split('@')[-1] + if "@" in bucket["key"]: + mldom = bucket["key"].lower().split("@")[-1] cpf[mldom] = True - if pf_author_count > int(globcount/2): + if pf_author_count > int(globcount / 2): break - ts.append({ - 'date': t, - 'Pony Factor (authors)': pf_author, - 'Meta-Pony Factor': len(cpf) - }) + ts.append( + { + "date": t, + "Pony Factor (authors)": pf_author, + "Meta-Pony Factor": len(cpf), + } + ) - ts = sorted(ts, key = lambda x: x['date']) + ts = sorted(ts, key=lambda x: x["date"]) JSON_OUT = { - 'text': "This shows Pony Factors as calculated over a %u month timespan. Authorship is a measure of the people it takes to make up the bulk of email traffic, and meta-pony is an estimation of how many organisations/companies are involved." % hl, - 'timeseries': ts, - 'okay': True, - 'responseTime': time.time() - now, + "text": "This shows Pony Factors as calculated over a %u month timespan. Authorship is a measure of the people it takes to make up the bulk of email traffic, and meta-pony is an estimation of how many organisations/companies are involved." + % hl, + "timeseries": ts, + "okay": True, + "responseTime": time.time() - now, } yield json.dumps(JSON_OUT) diff --git a/kibble/api/pages/mail/relationships.py b/kibble/api/pages/mail/relationships.py index 1633a25c..69e3df4a 100644 --- a/kibble/api/pages/mail/relationships.py +++ b/kibble/api/pages/mail/relationships.py @@ -1,4 +1,3 @@ - # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information @@ -62,9 +61,6 @@ ######################################################################## - - - """ This is the committer relationship list renderer for Kibble """ @@ -76,6 +72,7 @@ import re import math + def run(API, environ, indata, session): # We need to be logged in for this! @@ -86,62 +83,44 @@ def run(API, environ, indata, session): # First, fetch the view if we have such a thing enabled viewList = [] - if indata.get('view'): - viewList = session.getView(indata.get('view')) - if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - - dateTo = indata.get('to', int(time.time())) - dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span + if indata.get("view"): + viewList = session.getView(indata.get("view")) + if indata.get("subfilter"): + viewList = session.subFilter(indata.get("subfilter"), view=viewList) + dateTo = indata.get("to", int(time.time())) + dateFrom = indata.get( + "from", dateTo - (86400 * 30 * 6) + ) # Default to a 6 month span #################################################################### #################################################################### - dOrg = session.user['defaultOrganisation'] or "apache" + dOrg = session.user["defaultOrganisation"] or "apache" query = { - 'query': { - 'bool': { - 'must': [ - {'range': - { - 'ts': { - 'from': dateFrom, - 'to': dateTo - } - } - }, - { - 'term': { - 'organisation': dOrg - } - } - ] - } - } + "query": { + "bool": { + "must": [ + {"range": {"ts": {"from": dateFrom, "to": dateTo}}}, + {"term": {"organisation": dOrg}}, + ] } + } + } # Source-specific or view-specific?? - if indata.get('source'): - query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) + if indata.get("source"): + query["query"]["bool"]["must"].append( + {"term": {"sourceID": indata.get("source")}} + ) elif viewList: - query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) - if indata.get('email'): - query['query']['bool']['must'].append({'term': {'sender': indata.get('email')}}) + query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}}) + if indata.get("email"): + query["query"]["bool"]["must"].append({"term": {"sender": indata.get("email")}}) # Get number of commits, this period, per repo - query['aggs'] = { - 'per_ml': { - 'terms': { - 'field': 'sourceID', - 'size': 10000 - } - } - } + query["aggs"] = {"per_ml": {"terms": {"field": "sourceID", "size": 10000}}} res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="email", - size = 0, - body = query - ) + index=session.DB.dbname, doc_type="email", size=0, body=query + ) repos = {} repo_commits = {} @@ -150,33 +129,23 @@ def run(API, environ, indata, session): max_links = 0 max_shared = 0 max_authors = 0 - minLinks = indata.get('links', 1) + minLinks = indata.get("links", 1) # For each repo, count commits and gather data on authors - for doc in res['aggregations']['per_ml']['buckets']: - sourceID = doc['key'] - emails = doc['doc_count'] + for doc in res["aggregations"]["per_ml"]["buckets"]: + sourceID = doc["key"] + emails = doc["doc_count"] # Gather the unique authors/committers - query['aggs'] = { - 'per_ml': { - 'terms': { - 'field': 'sender', - 'size': 10000 - } - } - } + query["aggs"] = {"per_ml": {"terms": {"field": "sender", "size": 10000}}} xquery = copy.deepcopy(query) - xquery['query']['bool']['must'].append({'term': {'sourceID': sourceID}}) + xquery["query"]["bool"]["must"].append({"term": {"sourceID": sourceID}}) xres = session.DB.ES.search( - index=session.DB.dbname, - doc_type="email", - size = 0, - body = xquery + index=session.DB.dbname, doc_type="email", size=0, body=xquery ) authors = [] - for person in xres['aggregations']['per_ml']['buckets']: - authors.append(person['key']) + for person in xres["aggregations"]["per_ml"]["buckets"]: + authors.append(person["key"]) if emails > max_emails: max_emails = emails repos[sourceID] = authors @@ -191,9 +160,11 @@ def run(API, environ, indata, session): # Grab data of all sources for ID, repo in repos.items(): mylinks = {} - if not session.DB.ES.exists(index=session.DB.dbname, doc_type="source", id = ID): + if not session.DB.ES.exists(index=session.DB.dbname, doc_type="source", id=ID): continue - repodatas[ID] = session.DB.ES.get(index=session.DB.dbname, doc_type="source", id = ID) + repodatas[ID] = session.DB.ES.get( + index=session.DB.dbname, doc_type="source", id=ID + ) for ID, repo in repos.items(): mylinks = {} @@ -201,49 +172,59 @@ def run(API, environ, indata, session): continue repodata = repodatas[ID] oID = ID - if indata.get('collapse'): - m = re.search(indata.get('collapse'), repodata['_source']['sourceURL']) + if indata.get("collapse"): + m = re.search(indata.get("collapse"), repodata["_source"]["sourceURL"]) if m: ID = m.group(1) else: - ID = re.sub(r"^.+/(?:list\.html\?)?", "", repodata['_source']['sourceURL']) + ID = re.sub(r"^.+/(?:list\.html\?)?", "", repodata["_source"]["sourceURL"]) for xID, xrepo in repos.items(): if xID in repodatas: xrepodata = repodatas[xID] - if indata.get('collapse'): - m = re.search(indata.get('collapse'), xrepodata['_source']['sourceURL']) + if indata.get("collapse"): + m = re.search( + indata.get("collapse"), xrepodata["_source"]["sourceURL"] + ) if m: xID = m.group(1) else: - xID = re.sub(r"^.+/(?:list\.html\?)?", "", xrepodata['_source']['sourceURL']) + xID = re.sub( + r"^.+/(?:list\.html\?)?", "", xrepodata["_source"]["sourceURL"] + ) if xID != ID: xlinks = [] for author in xrepo: if author in repo: xlinks.append(author) - lname = "%s||%s" % (ID, xID) # Link name - rname = "%s||%s" % (xID, ID) # Reverse link name + lname = "%s||%s" % (ID, xID) # Link name + rname = "%s||%s" % (xID, ID) # Reverse link name if len(xlinks) >= minLinks and not rname in repo_links: mylinks[xID] = len(xlinks) - repo_links[lname] = repo_links.get(lname, 0) + len(xlinks) # How many contributors in common between project A and B? + repo_links[lname] = repo_links.get(lname, 0) + len( + xlinks + ) # How many contributors in common between project A and B? if repo_links[lname] > max_shared: max_shared = repo_links[lname] if ID not in repo_notoriety: repo_notoriety[ID] = set() - repo_notoriety[ID].update(mylinks.keys()) # How many projects is this repo connected to? + repo_notoriety[ID].update( + mylinks.keys() + ) # How many projects is this repo connected to? if ID not in repo_authors: repo_authors[ID] = set() - repo_authors[ID].update(repo) # How many projects is this repo connected to? + repo_authors[ID].update(repo) # How many projects is this repo connected to? if ID != oID: repo_commits[ID] = repo_commits.get(ID, 0) + repo_commits[oID] if repo_commits[ID] > max_emails: - max_emails = repo_commits[ID] # Used for calculating max link thickness + max_emails = repo_commits[ID] # Used for calculating max link thickness if len(repo_notoriety[ID]) > max_links: max_links = len(repo_notoriety[ID]) if len(repo_authors[ID]) > max_authors: - max_authors = len(repo_authors[ID]) # Used for calculating max sphere size in charts + max_authors = len( + repo_authors[ID] + ) # Used for calculating max sphere size in charts # Now, pull it all together! nodes = [] @@ -252,44 +233,43 @@ def run(API, environ, indata, session): for sourceID in repo_notoriety.keys(): lsize = 0 for k in repo_links.keys(): - fr, to = k.split('||') + fr, to = k.split("||") if fr == sourceID or to == sourceID: lsize += 1 asize = len(repo_authors[sourceID]) doc = { - 'id': sourceID, - 'name': sourceID, - 'emails': repo_commits[sourceID], - 'authors': asize, - 'links': lsize, - 'size': max(5, (1 - abs(math.log10(asize / max_authors))) * 45), - 'tooltip': "%u connections, %u contributors, %u emails" % (lsize, asize, repo_commits[sourceID]) + "id": sourceID, + "name": sourceID, + "emails": repo_commits[sourceID], + "authors": asize, + "links": lsize, + "size": max(5, (1 - abs(math.log10(asize / max_authors))) * 45), + "tooltip": "%u connections, %u contributors, %u emails" + % (lsize, asize, repo_commits[sourceID]), } nodes.append(doc) existing_repos.append(sourceID) for k, s in repo_links.items(): size = s - fr, to = k.split('||') + fr, to = k.split("||") if fr in existing_repos and to in existing_repos: doc = { - 'source': fr, - 'target': to, - 'value': max(1, (size/max_shared) * 8), - 'name': "%s ↔ %s" % (fr, to), - 'tooltip': "%u contributors in common" % size + "source": fr, + "target": to, + "value": max(1, (size / max_shared) * 8), + "name": "%s ↔ %s" % (fr, to), + "tooltip": "%u contributors in common" % size, } links.append(doc) JSON_OUT = { - 'maxLinks': max_links, - 'maxShared': max_shared, - 'widgetType': { - 'chartType': 'link' # Recommendation for the UI - }, - 'links': links, - 'nodes': nodes, - 'okay': True, - 'responseTime': time.time() - now + "maxLinks": max_links, + "maxShared": max_shared, + "widgetType": {"chartType": "link"}, # Recommendation for the UI + "links": links, + "nodes": nodes, + "okay": True, + "responseTime": time.time() - now, } yield json.dumps(JSON_OUT) diff --git a/kibble/api/pages/mail/retention.py b/kibble/api/pages/mail/retention.py index aa4f03e5..dd54a7b3 100644 --- a/kibble/api/pages/mail/retention.py +++ b/kibble/api/pages/mail/retention.py @@ -1,4 +1,3 @@ - # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information @@ -64,9 +63,6 @@ ######################################################################## - - - """ This is the code contributor retention factor renderer for Kibble """ @@ -76,6 +72,7 @@ import re import datetime + def run(API, environ, indata, session): # We need to be logged in for this! @@ -86,13 +83,14 @@ def run(API, environ, indata, session): # First, fetch the view if we have such a thing enabled viewList = [] - if indata.get('view'): - viewList = session.getView(indata.get('view')) - if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - - - hl = indata.get('span', 12) # By default, we define a contributor as active if having committer in the past year + if indata.get("view"): + viewList = session.getView(indata.get("view")) + if indata.get("subfilter"): + viewList = session.subFilter(indata.get("subfilter"), view=viewList) + + hl = indata.get( + "span", 12 + ) # By default, we define a contributor as active if having committer in the past year tnow = datetime.date.today() nm = tnow.month - (tnow.month % 3) ny = tnow.year @@ -109,7 +107,7 @@ def run(API, environ, indata, session): ny = 1970 FoundSomething = False - while ny < cy or (ny == cy and (nm+3) <= tnow.month): + while ny < cy or (ny == cy and (nm + 3) <= tnow.month): d = datetime.date(ny, nm, 1) t = time.mktime(d.timetuple()) nm += 3 @@ -121,72 +119,47 @@ def run(API, environ, indata, session): d = datetime.date(ny, nm, 1) tf = time.mktime(d.timetuple()) - #################################################################### #################################################################### - dOrg = session.user['defaultOrganisation'] or "apache" + dOrg = session.user["defaultOrganisation"] or "apache" query = { - 'query': { - 'bool': { - 'must': [ - {'range': - { - 'ts': { - 'from': t, - 'to': tf - } - } - }, - { - 'term': { - 'organisation': dOrg - } - } - ] - } - } + "query": { + "bool": { + "must": [ + {"range": {"ts": {"from": t, "to": tf}}}, + {"term": {"organisation": dOrg}}, + ] } + } + } # Source-specific or view-specific?? - if indata.get('source'): - query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) + if indata.get("source"): + query["query"]["bool"]["must"].append( + {"term": {"sourceID": indata.get("source")}} + ) elif viewList: - query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) + query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}}) # Get an initial count of commits - res = session.DB.ES.count( - index=session.DB.dbname, - doc_type="email", - body = query - ) + res = session.DB.ES.count(index=session.DB.dbname, doc_type="email", body=query) - globcount = res['count'] + globcount = res["count"] if globcount == 0 and not FoundSomething: continue FoundSomething = True # Get top 1000 committers this period - query['aggs'] = { - 'by_author': { - 'terms': { - 'field': 'sender', - 'size': 200000 - } - } - } + query["aggs"] = {"by_author": {"terms": {"field": "sender", "size": 200000}}} res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="email", - size = 0, - body = query - ) - + index=session.DB.dbname, doc_type="email", size=0, body=query + ) retained = 0 added = 0 lost = 0 thisPeriod = [] - for bucket in res['aggregations']['by_author']['buckets']: - who = bucket['key'] + for bucket in res["aggregations"]["by_author"]["buckets"]: + who = bucket["key"] thisPeriod.append(who) if who not in peopleSeen: peopleSeen[who] = tf @@ -197,7 +170,7 @@ def run(API, environ, indata, session): prune = [] for k, v in activePeople.items(): - if v < (t - (hl*30.45*86400)): + if v < (t - (hl * 30.45 * 86400)): prune.append(k) lost += 1 @@ -206,46 +179,49 @@ def run(API, environ, indata, session): del peopleSeen[who] retained = len(activePeople) - added - ts.append({ - 'date': tf, - 'People who (re)joined': added, - 'People who quit': lost, - 'People retained': retained, - 'Active people': added + retained - }) + ts.append( + { + "date": tf, + "People who (re)joined": added, + "People who quit": lost, + "People retained": retained, + "Active people": added + retained, + } + ) groups = [ - ['More than 5 years', (5*365*86400)+1], - ['2 - 5 years', (2*365*86400)+1], - ['1 - 2 years', (365*86400)], - ['Less than a year', 1] + ["More than 5 years", (5 * 365 * 86400) + 1], + ["2 - 5 years", (2 * 365 * 86400) + 1], + ["1 - 2 years", (365 * 86400)], + ["Less than a year", 1], ] counts = {} totExp = 0 for person, age in activePeople.items(): totExp += time.time() - allPeople[person] - for el in sorted(groups, key = lambda x: x[1], reverse = True): + for el in sorted(groups, key=lambda x: x[1], reverse=True): if allPeople[person] <= time.time() - el[1]: counts[el[0]] = counts.get(el[0], 0) + 1 break - avgyr = (totExp / (86400*365)) / max(len(activePeople),1) + avgyr = (totExp / (86400 * 365)) / max(len(activePeople), 1) - ts = sorted(ts, key = lambda x: x['date']) + ts = sorted(ts, key=lambda x: x["date"]) avgm = "" yr = int(avgyr) - ym = round((avgyr-yr)*12) + ym = round((avgyr - yr) * 12) if yr >= 1: avgm += "%u year%s" % (yr, "s" if yr != 1 else "") if ym > 0: avgm += "%s%u month%s" % (", " if yr > 0 else "", ym, "s" if ym != 1 else "") JSON_OUT = { - 'text': "This shows Contributor retention as calculated over a %u month timespan. The average experience of currently active people is %s." % (hl, avgm), - 'timeseries': ts, - 'counts': counts, - 'averageYears': avgyr, - 'okay': True, - 'responseTime': time.time() - now, + "text": "This shows Contributor retention as calculated over a %u month timespan. The average experience of currently active people is %s." + % (hl, avgm), + "timeseries": ts, + "counts": counts, + "averageYears": avgyr, + "okay": True, + "responseTime": time.time() - now, } yield json.dumps(JSON_OUT) diff --git a/kibble/api/pages/mail/timeseries-single.py b/kibble/api/pages/mail/timeseries-single.py index 82580aa9..d615715c 100644 --- a/kibble/api/pages/mail/timeseries-single.py +++ b/kibble/api/pages/mail/timeseries-single.py @@ -1,4 +1,3 @@ - # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information @@ -62,9 +61,6 @@ ######################################################################## - - - """ This is the email-only timeseries renderer for Kibble unlike timeseries.py, this only shows mail sent, not topics or authors. @@ -74,6 +70,7 @@ import time import hashlib + def run(API, environ, indata, session): # We need to be logged in for this! @@ -84,82 +81,60 @@ def run(API, environ, indata, session): # First, fetch the view if we have such a thing enabled viewList = [] - if indata.get('view'): - viewList = session.getView(indata.get('view')) - if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - - - dateTo = indata.get('to', int(time.time())) - dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span + if indata.get("view"): + viewList = session.getView(indata.get("view")) + if indata.get("subfilter"): + viewList = session.subFilter(indata.get("subfilter"), view=viewList) - interval = indata.get('interval', 'month') + dateTo = indata.get("to", int(time.time())) + dateFrom = indata.get( + "from", dateTo - (86400 * 30 * 6) + ) # Default to a 6 month span + interval = indata.get("interval", "month") #################################################################### #################################################################### - dOrg = session.user['defaultOrganisation'] or "apache" + dOrg = session.user["defaultOrganisation"] or "apache" query = { - 'query': { - 'bool': { - 'must': [ - {'range': - { - 'ts': { - 'from': dateFrom, - 'to': dateTo - } - } - }, - { - 'term': { - 'organisation': dOrg - } - } - ] - } - } + "query": { + "bool": { + "must": [ + {"range": {"ts": {"from": dateFrom, "to": dateTo}}}, + {"term": {"organisation": dOrg}}, + ] } + } + } # Source-specific or view-specific?? - if indata.get('source'): - query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) + if indata.get("source"): + query["query"]["bool"]["must"].append( + {"term": {"sourceID": indata.get("source")}} + ) elif viewList: - query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) - if indata.get('email'): - query['query']['bool']['should'] = [{'term': {'sender': indata.get('email')}}] - query['query']['bool']['minimum_should_match'] = 1 + query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}}) + if indata.get("email"): + query["query"]["bool"]["should"] = [{"term": {"sender": indata.get("email")}}] + query["query"]["bool"]["minimum_should_match"] = 1 # Get number of committers, this period - query['aggs'] = { - 'timeseries': { - 'date_histogram': { - 'field': 'date', - 'interval': interval - } - } - } + query["aggs"] = { + "timeseries": {"date_histogram": {"field": "date", "interval": interval}} + } res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="email", - size = 0, - body = query - ) + index=session.DB.dbname, doc_type="email", size=0, body=query + ) timeseries = [] - for bucket in res['aggregations']['timeseries']['buckets']: - ts = int(bucket['key'] / 1000) - timeseries.append({ - 'date': ts, - 'emails': bucket['doc_count'] - }) + for bucket in res["aggregations"]["timeseries"]["buckets"]: + ts = int(bucket["key"] / 1000) + timeseries.append({"date": ts, "emails": bucket["doc_count"]}) JSON_OUT = { - 'widgetType': { - 'chartType': 'bar' # Recommendation for the UI - }, - 'timeseries': timeseries, - 'interval': interval, - 'okay': True, - 'responseTime': time.time() - now + "widgetType": {"chartType": "bar"}, # Recommendation for the UI + "timeseries": timeseries, + "interval": interval, + "okay": True, + "responseTime": time.time() - now, } yield json.dumps(JSON_OUT) diff --git a/kibble/api/pages/mail/timeseries.py b/kibble/api/pages/mail/timeseries.py index 5072de4d..639d728d 100644 --- a/kibble/api/pages/mail/timeseries.py +++ b/kibble/api/pages/mail/timeseries.py @@ -1,4 +1,3 @@ - # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information @@ -62,9 +61,6 @@ ######################################################################## - - - """ This is the email timeseries renderer for Kibble """ @@ -73,6 +69,7 @@ import time import hashlib + def run(API, environ, indata, session): # We need to be logged in for this! @@ -83,107 +80,91 @@ def run(API, environ, indata, session): # First, fetch the view if we have such a thing enabled viewList = [] - if indata.get('view'): - viewList = session.getView(indata.get('view')) - if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - + if indata.get("view"): + viewList = session.getView(indata.get("view")) + if indata.get("subfilter"): + viewList = session.subFilter(indata.get("subfilter"), view=viewList) - dateTo = indata.get('to', int(time.time())) - dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span + dateTo = indata.get("to", int(time.time())) + dateFrom = indata.get( + "from", dateTo - (86400 * 30 * 6) + ) # Default to a 6 month span - which = 'committer_email' - role = 'committer' - if indata.get('author', False): - which = 'author_email' - role = 'author' - - interval = indata.get('interval', 'month') + which = "committer_email" + role = "committer" + if indata.get("author", False): + which = "author_email" + role = "author" + interval = indata.get("interval", "month") #################################################################### #################################################################### - dOrg = session.user['defaultOrganisation'] or "apache" + dOrg = session.user["defaultOrganisation"] or "apache" query = { - 'query': { - 'bool': { - 'must': [ - {'range': - { - 'date': { - 'from': time.strftime("%Y/%m/%d 00:00:00", time.gmtime(dateFrom)), - 'to': time.strftime("%Y/%m/%d 23:59:59", time.gmtime(dateTo)) - } - } - }, - { - 'term': { - 'organisation': dOrg - } + "query": { + "bool": { + "must": [ + { + "range": { + "date": { + "from": time.strftime( + "%Y/%m/%d 00:00:00", time.gmtime(dateFrom) + ), + "to": time.strftime( + "%Y/%m/%d 23:59:59", time.gmtime(dateTo) + ), } - ] - } - } + } + }, + {"term": {"organisation": dOrg}}, + ] } + } + } # Source-specific or view-specific?? - if indata.get('source'): - query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) + if indata.get("source"): + query["query"]["bool"]["must"].append( + {"term": {"sourceID": indata.get("source")}} + ) elif viewList: - query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) - if indata.get('email'): - query['query']['bool']['should'] = [{'term': {'sender': indata.get('email')}}] - query['query']['bool']['minimum_should_match'] = 1 + query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}}) + if indata.get("email"): + query["query"]["bool"]["should"] = [{"term": {"sender": indata.get("email")}}] + query["query"]["bool"]["minimum_should_match"] = 1 # Get number of committers, this period - query['aggs'] = { - 'timeseries': { - 'date_histogram': { - 'field': 'date', - 'interval': interval - }, - 'aggs': { - 'email': { - 'sum': { - 'field': 'emails' - } - }, - 'topics': { - 'sum': { - 'field': 'topics' - } - }, - 'authors': { - 'sum': { - 'field': 'authors' - } - } - } - } + query["aggs"] = { + "timeseries": { + "date_histogram": {"field": "date", "interval": interval}, + "aggs": { + "email": {"sum": {"field": "emails"}}, + "topics": {"sum": {"field": "topics"}}, + "authors": {"sum": {"field": "authors"}}, + }, } + } res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="mailstats", - size = 0, - body = query - ) + index=session.DB.dbname, doc_type="mailstats", size=0, body=query + ) timeseries = [] - for bucket in res['aggregations']['timeseries']['buckets']: - ts = int(bucket['key'] / 1000) - timeseries.append({ - 'date': ts, - 'emails': bucket['email']['value'], - 'topics': bucket['topics']['value'], - 'authors': bucket['authors']['value'] - }) + for bucket in res["aggregations"]["timeseries"]["buckets"]: + ts = int(bucket["key"] / 1000) + timeseries.append( + { + "date": ts, + "emails": bucket["email"]["value"], + "topics": bucket["topics"]["value"], + "authors": bucket["authors"]["value"], + } + ) JSON_OUT = { - 'widgetType': { - 'chartType': 'bar' # Recommendation for the UI - }, - 'timeseries': timeseries, - 'interval': interval, - 'okay': True, - 'responseTime': time.time() - now + "widgetType": {"chartType": "bar"}, # Recommendation for the UI + "timeseries": timeseries, + "interval": interval, + "okay": True, + "responseTime": time.time() - now, } yield json.dumps(JSON_OUT) diff --git a/kibble/api/pages/mail/top-authors.py b/kibble/api/pages/mail/top-authors.py index 4f94e0f6..d2d14c1f 100644 --- a/kibble/api/pages/mail/top-authors.py +++ b/kibble/api/pages/mail/top-authors.py @@ -1,4 +1,3 @@ - # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information @@ -62,9 +61,6 @@ ######################################################################## - - - """ This is the TopN committers list renderer for Kibble """ @@ -76,6 +72,7 @@ ROBITS = r"(git|jira|jenkins|gerrit)@" + def run(API, environ, indata, session): # We need to be logged in for this! @@ -86,96 +83,72 @@ def run(API, environ, indata, session): # First, fetch the view if we have such a thing enabled viewList = [] - if indata.get('view'): - viewList = session.getView(indata.get('view')) - if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - - - dateTo = indata.get('to', int(time.time())) - dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span + if indata.get("view"): + viewList = session.getView(indata.get("view")) + if indata.get("subfilter"): + viewList = session.subFilter(indata.get("subfilter"), view=viewList) - interval = indata.get('interval', 'month') + dateTo = indata.get("to", int(time.time())) + dateFrom = indata.get( + "from", dateTo - (86400 * 30 * 6) + ) # Default to a 6 month span + interval = indata.get("interval", "month") #################################################################### #################################################################### - dOrg = session.user['defaultOrganisation'] or "apache" + dOrg = session.user["defaultOrganisation"] or "apache" query = { - 'query': { - 'bool': { - 'must': [ - {'range': - { - 'ts': { - 'from': dateFrom, - 'to': dateTo - } - } - }, - { - 'term': { - 'organisation': dOrg - } - } - ] - } - } + "query": { + "bool": { + "must": [ + {"range": {"ts": {"from": dateFrom, "to": dateTo}}}, + {"term": {"organisation": dOrg}}, + ] } + } + } # Source-specific or view-specific?? - if indata.get('source'): - query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) + if indata.get("source"): + query["query"]["bool"]["must"].append( + {"term": {"sourceID": indata.get("source")}} + ) elif viewList: - query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) + query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}}) # Get top 25 committers this period - query['aggs'] = { - 'authors': { - 'terms': { - 'field': 'sender', - 'size': 30 - } - } - } + query["aggs"] = {"authors": {"terms": {"field": "sender", "size": 30}}} res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="email", - size = 0, - body = query - ) + index=session.DB.dbname, doc_type="email", size=0, body=query + ) people = {} - for bucket in res['aggregations']['authors']['buckets']: - email = bucket['key'] + for bucket in res["aggregations"]["authors"]["buckets"]: + email = bucket["key"] # By default, we want to see humans, not bots on this list! if re.match(ROBITS, email): continue - count = bucket['doc_count'] - sha = hashlib.sha1( ("%s%s" % (dOrg, email)).encode('utf-8') ).hexdigest() - if session.DB.ES.exists(index=session.DB.dbname,doc_type="person",id = sha): - pres = session.DB.ES.get( - index=session.DB.dbname, - doc_type="person", - id = sha - ) - person = pres['_source'] - person['name'] = person.get('name', 'unknown') + count = bucket["doc_count"] + sha = hashlib.sha1(("%s%s" % (dOrg, email)).encode("utf-8")).hexdigest() + if session.DB.ES.exists(index=session.DB.dbname, doc_type="person", id=sha): + pres = session.DB.ES.get(index=session.DB.dbname, doc_type="person", id=sha) + person = pres["_source"] + person["name"] = person.get("name", "unknown") people[email] = person - people[email]['gravatar'] = hashlib.md5(person.get('email', 'unknown').encode('utf-8')).hexdigest() - people[email]['count'] = count + people[email]["gravatar"] = hashlib.md5( + person.get("email", "unknown").encode("utf-8") + ).hexdigest() + people[email]["count"] = count topN = [] for email, person in people.items(): topN.append(person) - topN = sorted(topN, key = lambda x: x['count'], reverse = True) + topN = sorted(topN, key=lambda x: x["count"], reverse=True) JSON_OUT = { - 'topN': { - 'denoter': 'emails', - 'items': topN - }, - 'sorted': people, - 'okay': True, - 'responseTime': time.time() - now + "topN": {"denoter": "emails", "items": topN}, + "sorted": people, + "okay": True, + "responseTime": time.time() - now, } yield json.dumps(JSON_OUT) diff --git a/kibble/api/pages/mail/top-topics.py b/kibble/api/pages/mail/top-topics.py index 90c5a805..7696736e 100644 --- a/kibble/api/pages/mail/top-topics.py +++ b/kibble/api/pages/mail/top-topics.py @@ -1,4 +1,3 @@ - # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information @@ -62,9 +61,6 @@ ######################################################################## - - - """ This is the TopN committers list renderer for Kibble """ @@ -73,6 +69,7 @@ import time import hashlib + def run(API, environ, indata, session): # We need to be logged in for this! @@ -83,73 +80,57 @@ def run(API, environ, indata, session): # First, fetch the view if we have such a thing enabled viewList = [] - if indata.get('view'): - viewList = session.getView(indata.get('view')) - if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - - - dateTo = indata.get('to', int(time.time())) - dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span + if indata.get("view"): + viewList = session.getView(indata.get("view")) + if indata.get("subfilter"): + viewList = session.subFilter(indata.get("subfilter"), view=viewList) - interval = indata.get('interval', 'month') + dateTo = indata.get("to", int(time.time())) + dateFrom = indata.get( + "from", dateTo - (86400 * 30 * 6) + ) # Default to a 6 month span + interval = indata.get("interval", "month") #################################################################### #################################################################### - dOrg = session.user['defaultOrganisation'] or "apache" + dOrg = session.user["defaultOrganisation"] or "apache" query = { - 'query': { - 'bool': { - 'must': [ - {'range': - { - 'ts': { - 'from': dateFrom, - 'to': dateTo - } - } - }, - { - 'term': { - 'organisation': dOrg - } - } - ] - } - }, - 'sort': [{ - 'emails': 'desc' - }] + "query": { + "bool": { + "must": [ + {"range": {"ts": {"from": dateFrom, "to": dateTo}}}, + {"term": {"organisation": dOrg}}, + ] } + }, + "sort": [{"emails": "desc"}], + } # Source-specific or view-specific?? - if indata.get('source'): - query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) + if indata.get("source"): + query["query"]["bool"]["must"].append( + {"term": {"sourceID": indata.get("source")}} + ) elif viewList: - query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) + query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}}) res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="mailtop", - size = 25, - body = query - ) + index=session.DB.dbname, doc_type="mailtop", size=25, body=query + ) topN = [] - for bucket in res['hits']['hits']: - topN.append( { - 'source': bucket['_source']['sourceURL'], - 'name': bucket['_source']['subject'], - 'count': bucket['_source']['emails'] - }) + for bucket in res["hits"]["hits"]: + topN.append( + { + "source": bucket["_source"]["sourceURL"], + "name": bucket["_source"]["subject"], + "count": bucket["_source"]["emails"], + } + ) JSON_OUT = { - 'topN': { - 'denoter': 'emails', - 'items': topN, - 'icon': 'envelope' - }, - 'okay': True, - 'responseTime': time.time() - now + "topN": {"denoter": "emails", "items": topN, "icon": "envelope"}, + "okay": True, + "responseTime": time.time() - now, } yield json.dumps(JSON_OUT) diff --git a/kibble/api/pages/mail/trends.py b/kibble/api/pages/mail/trends.py index 91dcc3be..5348bc36 100644 --- a/kibble/api/pages/mail/trends.py +++ b/kibble/api/pages/mail/trends.py @@ -1,4 +1,3 @@ - # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information @@ -62,9 +61,6 @@ ######################################################################## - - - """ This is the Email trends renderer for Kibble """ @@ -73,6 +69,7 @@ import time import datetime + def run(API, environ, indata, session): # We need to be logged in for this! @@ -83,20 +80,20 @@ def run(API, environ, indata, session): # First, fetch the view if we have such a thing enabled viewList = [] - if indata.get('view'): - viewList = session.getView(indata.get('view')) - if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - - - dateTo = indata.get('to', int(time.time())) - dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span + if indata.get("view"): + viewList = session.getView(indata.get("view")) + if indata.get("subfilter"): + viewList = session.subFilter(indata.get("subfilter"), view=viewList) + + dateTo = indata.get("to", int(time.time())) + dateFrom = indata.get( + "from", dateTo - (86400 * 30 * 6) + ) # Default to a 6 month span if dateFrom < 0: dateFrom = 0 dateYonder = dateFrom - (dateTo - dateFrom) - - dOrg = session.user['defaultOrganisation'] or "apache" + dOrg = session.user["defaultOrganisation"] or "apache" #################################################################### # We start by doing all the queries for THIS period. # @@ -104,234 +101,189 @@ def run(API, environ, indata, session): # and rerun the same queries. # #################################################################### query = { - 'query': { - 'bool': { - 'must': [ - {'range': - { - 'date': { - 'from': time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(dateFrom)), - 'to': time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(dateTo)) - } - } - }, - { - 'term': { - 'organisation': dOrg - } + "query": { + "bool": { + "must": [ + { + "range": { + "date": { + "from": time.strftime( + "%Y/%m/%d %H:%M:%S", time.localtime(dateFrom) + ), + "to": time.strftime( + "%Y/%m/%d %H:%M:%S", time.localtime(dateTo) + ), } - ] - } - } + } + }, + {"term": {"organisation": dOrg}}, + ] } + } + } # Source-specific or view-specific?? - if indata.get('source'): - query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) + if indata.get("source"): + query["query"]["bool"]["must"].append( + {"term": {"sourceID": indata.get("source")}} + ) elif viewList: - query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) - if indata.get('email'): - query['query']['bool']['must'].append({'term': {'sender': indata.get('email')}}) - + query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}}) + if indata.get("email"): + query["query"]["bool"]["must"].append({"term": {"sender": indata.get("email")}}) # Get number of threads and emails, this period - query['aggs'] = { - 'topics': { - 'sum': { - 'field': 'topics' - } - }, - 'emails': { - 'sum': { - 'field': 'emails' - } - } - } + query["aggs"] = { + "topics": {"sum": {"field": "topics"}}, + "emails": {"sum": {"field": "emails"}}, + } res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="mailstats", - size = 0, - body = query - ) - no_topics = res['aggregations']['topics']['value'] - no_emails = res['aggregations']['emails']['value'] - + index=session.DB.dbname, doc_type="mailstats", size=0, body=query + ) + no_topics = res["aggregations"]["topics"]["value"] + no_emails = res["aggregations"]["emails"]["value"] # Authors query = { - 'query': { - 'bool': { - 'must': [ - {'range': - { - 'date': { - 'from': time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(dateFrom)), - 'to': time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(dateTo)) - } - } - }, - { - 'term': { - 'organisation': dOrg - } + "query": { + "bool": { + "must": [ + { + "range": { + "date": { + "from": time.strftime( + "%Y/%m/%d %H:%M:%S", time.localtime(dateFrom) + ), + "to": time.strftime( + "%Y/%m/%d %H:%M:%S", time.localtime(dateTo) + ), } - ] - } - } + } + }, + {"term": {"organisation": dOrg}}, + ] } + } + } # Source-specific or view-specific?? - if indata.get('source'): - query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) + if indata.get("source"): + query["query"]["bool"]["must"].append( + {"term": {"sourceID": indata.get("source")}} + ) elif viewList: - query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) - if indata.get('email'): - query['query']['bool']['must'].append({'term': {'sender': indata.get('email')}}) + query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}}) + if indata.get("email"): + query["query"]["bool"]["must"].append({"term": {"sender": indata.get("email")}}) # Get number of authors, this period - query['aggs'] = { - 'authors': { - 'cardinality': { - 'field': 'sender' - } - } - } + query["aggs"] = {"authors": {"cardinality": {"field": "sender"}}} res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="email", - size = 0, - body = query - ) - no_authors = res['aggregations']['authors']['value'] - - + index=session.DB.dbname, doc_type="email", size=0, body=query + ) + no_authors = res["aggregations"]["authors"]["value"] #################################################################### # Change to PRIOR SPAN # #################################################################### query = { - 'query': { - 'bool': { - 'must': [ - {'range': - { - 'date': { - 'from': time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(dateYonder)), - 'to': time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(dateFrom-1)) - } - } - }, - { - 'term': { - 'organisation': dOrg - } + "query": { + "bool": { + "must": [ + { + "range": { + "date": { + "from": time.strftime( + "%Y/%m/%d %H:%M:%S", time.localtime(dateYonder) + ), + "to": time.strftime( + "%Y/%m/%d %H:%M:%S", time.localtime(dateFrom - 1) + ), } - ] - } - } + } + }, + {"term": {"organisation": dOrg}}, + ] } + } + } # Source-specific or view-specific?? - if indata.get('source'): - query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) + if indata.get("source"): + query["query"]["bool"]["must"].append( + {"term": {"sourceID": indata.get("source")}} + ) elif viewList: - query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) - if indata.get('email'): - query['query']['bool']['must'].append({'term': {'sender': indata.get('email')}}) - + query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}}) + if indata.get("email"): + query["query"]["bool"]["must"].append({"term": {"sender": indata.get("email")}}) # Get number of threads and emails, this period - query['aggs'] = { - 'topics': { - 'sum': { - 'field': 'topics' - } - }, - 'emails': { - 'sum': { - 'field': 'emails' - } - } - } + query["aggs"] = { + "topics": {"sum": {"field": "topics"}}, + "emails": {"sum": {"field": "emails"}}, + } res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="mailstats", - size = 0, - body = query - ) - no_topics_before = res['aggregations']['topics']['value'] - no_emails_before = res['aggregations']['emails']['value'] - + index=session.DB.dbname, doc_type="mailstats", size=0, body=query + ) + no_topics_before = res["aggregations"]["topics"]["value"] + no_emails_before = res["aggregations"]["emails"]["value"] # Authors query = { - 'query': { - 'bool': { - 'must': [ - {'range': - { - 'date': { - 'from': time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(dateYonder)), - 'to': time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(dateFrom-1)) - } - } - }, - { - 'term': { - 'organisation': dOrg - } + "query": { + "bool": { + "must": [ + { + "range": { + "date": { + "from": time.strftime( + "%Y/%m/%d %H:%M:%S", time.localtime(dateYonder) + ), + "to": time.strftime( + "%Y/%m/%d %H:%M:%S", time.localtime(dateFrom - 1) + ), } - ] - } - } + } + }, + {"term": {"organisation": dOrg}}, + ] } + } + } # Source-specific or view-specific?? - if indata.get('source'): - query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) + if indata.get("source"): + query["query"]["bool"]["must"].append( + {"term": {"sourceID": indata.get("source")}} + ) elif viewList: - query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) - if indata.get('email'): - query['query']['bool']['must'].append({'term': {'sender': indata.get('email')}}) + query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}}) + if indata.get("email"): + query["query"]["bool"]["must"].append({"term": {"sender": indata.get("email")}}) # Get number of authors, this period - query['aggs'] = { - 'authors': { - 'cardinality': { - 'field': 'sender' - } - } - } + query["aggs"] = {"authors": {"cardinality": {"field": "sender"}}} res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="email", - size = 0, - body = query - ) - no_authors_before = res['aggregations']['authors']['value'] - - - + index=session.DB.dbname, doc_type="email", size=0, body=query + ) + no_authors_before = res["aggregations"]["authors"]["value"] trends = { "authors": { - 'before': no_authors_before, - 'after': no_authors, - 'title': "People sending email this period" + "before": no_authors_before, + "after": no_authors, + "title": "People sending email this period", }, "topics": { - 'before': no_topics_before, - 'after': no_topics, - 'title': "Topics discussed this period" + "before": no_topics_before, + "after": no_topics, + "title": "Topics discussed this period", }, "email": { - 'before': no_emails_before, - 'after': no_emails, - 'title': "Emails sent this period" - } + "before": no_emails_before, + "after": no_emails, + "title": "Emails sent this period", + }, } - JSON_OUT = { - 'trends': trends, - 'okay': True, - 'responseTime': time.time() - now - } + JSON_OUT = {"trends": trends, "okay": True, "responseTime": time.time() - now} yield json.dumps(JSON_OUT) diff --git a/kibble/api/pages/org/contributors.py b/kibble/api/pages/org/contributors.py index aec3056d..632bc71f 100644 --- a/kibble/api/pages/org/contributors.py +++ b/kibble/api/pages/org/contributors.py @@ -1,4 +1,3 @@ - # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information @@ -45,9 +44,6 @@ ######################################################################## - - - """ This is the contributor list renderer for Kibble """ @@ -56,7 +52,8 @@ import time import hashlib -cached_people = {} # Store people we know, so we don't have to fetch them again. +cached_people = {} # Store people we know, so we don't have to fetch them again. + def run(API, environ, indata, session): @@ -64,107 +61,90 @@ def run(API, environ, indata, session): if not session.user: raise API.exception(403, "You must be logged in to use this API endpoint! %s") - # First, fetch the view if we have such a thing enabled viewList = [] - if indata.get('view'): - viewList = session.getView(indata.get('view')) - if indata.get('subfilter'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - + if indata.get("view"): + viewList = session.getView(indata.get("view")) + if indata.get("subfilter"): + viewList = session.subFilter(indata.get("subfilter"), view=viewList) # Fetch all contributors for the org - dOrg = session.user['defaultOrganisation'] or "apache" - query = { - 'query': { - 'bool': { - 'must': [ - { - 'term': { - 'organisation': dOrg - } - } - ] - } - } - } + dOrg = session.user["defaultOrganisation"] or "apache" + query = {"query": {"bool": {"must": [{"term": {"organisation": dOrg}}]}}} # Source-specific or view-specific?? - if indata.get('source'): - query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}}) + if indata.get("source"): + query["query"]["bool"]["must"].append( + {"term": {"sourceID": indata.get("source")}} + ) elif viewList: - query['query']['bool']['must'].append({'terms': {'sourceID': viewList}}) + query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}}) # Date specific? - dateTo = indata.get('to', int(time.time())) - dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span - query['query']['bool']['must'].append( - {'range': - { - 'ts': { - 'from': dateFrom, - 'to': dateTo - } - } - } - ) + dateTo = indata.get("to", int(time.time())) + dateFrom = indata.get( + "from", dateTo - (86400 * 30 * 6) + ) # Default to a 6 month span + query["query"]["bool"]["must"].append( + {"range": {"ts": {"from": dateFrom, "to": dateTo}}} + ) emails = [] contribs = {} - for field in ['sender', 'author_email', 'issueCreator', 'issueCloser']: + for field in ["sender", "author_email", "issueCreator", "issueCloser"]: N = 0 while N < 5: - query['aggs'] = { - 'by_id': { - 'terms': { - 'field': field, - 'size': 10000, - 'include': { - 'partition': N, - 'num_partitions': 5 - }, + query["aggs"] = { + "by_id": { + "terms": { + "field": field, + "size": 10000, + "include": {"partition": N, "num_partitions": 5}, } } } res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="*,-*_code_commit,-*_file_history", - size = 0, - body = query - ) + index=session.DB.dbname, + doc_type="*,-*_code_commit,-*_file_history", + size=0, + body=query, + ) # Break if we've found nothing more - #if len(res['aggregations']['by_id']['buckets']) == 0: - #break + # if len(res['aggregations']['by_id']['buckets']) == 0: + # break # otherwise, add 'em to the pile - for k in res['aggregations']['by_id']['buckets']: - if k['key'] not in emails: - emails.append(k['key']) - contribs[k['key']] = contribs.get(k['key'], 0) + k['doc_count'] + for k in res["aggregations"]["by_id"]["buckets"]: + if k["key"] not in emails: + emails.append(k["key"]) + contribs[k["key"]] = contribs.get(k["key"], 0) + k["doc_count"] N += 1 people = [] for email in emails: - pid = hashlib.sha1( ("%s%s" % (dOrg, email)).encode('ascii', errors='replace')).hexdigest() + pid = hashlib.sha1( + ("%s%s" % (dOrg, email)).encode("ascii", errors="replace") + ).hexdigest() person = None if pid in cached_people: person = cached_people[pid] else: try: - doc = session.DB.ES.get(index=session.DB.dbname, doc_type = 'person', id = pid) + doc = session.DB.ES.get( + index=session.DB.dbname, doc_type="person", id=pid + ) cached_people[pid] = { - 'name': doc['_source']['name'], - 'email': doc['_source']['email'], - 'gravatar': hashlib.md5( email.encode('ascii', errors = 'replace')).hexdigest() + "name": doc["_source"]["name"], + "email": doc["_source"]["email"], + "gravatar": hashlib.md5( + email.encode("ascii", errors="replace") + ).hexdigest(), } person = cached_people[pid] except: - pass # Couldn't find 'em, booo + pass # Couldn't find 'em, booo if person: - person['contributions'] = contribs.get(email, 0) + person["contributions"] = contribs.get(email, 0) people.append(person) - JSON_OUT = { - 'people': people, - 'okay': True - } + JSON_OUT = {"people": people, "okay": True} yield json.dumps(JSON_OUT) diff --git a/kibble/api/pages/org/list.py b/kibble/api/pages/org/list.py index d21ef01b..0949f7de 100644 --- a/kibble/api/pages/org/list.py +++ b/kibble/api/pages/org/list.py @@ -1,4 +1,3 @@ - # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information @@ -89,9 +88,6 @@ ######################################################################## - - - """ This is the Org list renderer for Kibble """ @@ -99,82 +95,79 @@ import json import time + def run(API, environ, indata, session): now = time.time() # We need to be logged in for this! if not session.user: raise API.exception(403, "You must be logged in to use this API endpoint!") - method = environ['REQUEST_METHOD'] + method = environ["REQUEST_METHOD"] # Are we making a new org? if method == "PUT": - if session.user['userlevel'] == "admin": - orgname = indata.get('name', 'Foo') - orgdesc = indata.get('desc', '') - orgid = indata.get('id', str(int(time.time()))) - if session.DB.ES.exists(index=session.DB.dbname, doc_type='organisation', id = orgid): + if session.user["userlevel"] == "admin": + orgname = indata.get("name", "Foo") + orgdesc = indata.get("desc", "") + orgid = indata.get("id", str(int(time.time()))) + if session.DB.ES.exists( + index=session.DB.dbname, doc_type="organisation", id=orgid + ): raise API.exception(403, "Organisation ID already in use!") - doc = { - 'id': orgid, - 'name': orgname, - 'description': orgdesc, - 'admins': [] - } - session.DB.ES.index(index=session.DB.dbname, doc_type='organisation', id = orgid, body = doc) + doc = {"id": orgid, "name": orgname, "description": orgdesc, "admins": []} + session.DB.ES.index( + index=session.DB.dbname, doc_type="organisation", id=orgid, body=doc + ) time.sleep(1.5) yield json.dumps({"okay": True, "message": "Organisation created!"}) return else: - raise API.exception(403, "Only administrators can create new organisations.") + raise API.exception( + 403, "Only administrators can create new organisations." + ) #################################################################### orgs = [] - if session.user['userlevel'] == "admin": + if session.user["userlevel"] == "admin": res = session.DB.ES.search( index=session.DB.dbname, doc_type="organisation", - body = {'query': { 'match_all': {}}} + body={"query": {"match_all": {}}}, ) - for doc in res['hits']['hits']: - orgID = doc['_source']['id'] + for doc in res["hits"]["hits"]: + orgID = doc["_source"]["id"] numDocs = session.DB.ES.count( index=session.DB.dbname, - body = {'query': { 'term': {'organisation': orgID}}} - )['count'] + body={"query": {"term": {"organisation": orgID}}}, + )["count"] numSources = session.DB.ES.count( index=session.DB.dbname, doc_type="source", - body = {'query': { 'term': {'organisation': orgID}}} - )['count'] - doc['_source']['sourceCount'] = numSources - doc['_source']['docCount'] = numDocs - orgs.append(doc['_source']) + body={"query": {"term": {"organisation": orgID}}}, + )["count"] + doc["_source"]["sourceCount"] = numSources + doc["_source"]["docCount"] = numDocs + orgs.append(doc["_source"]) else: res = session.DB.ES.search( index=session.DB.dbname, doc_type="organisation", - body = {'query': { 'terms': {'id': session.user['organisations']}}} + body={"query": {"terms": {"id": session.user["organisations"]}}}, ) - for doc in res['hits']['hits']: - orgID = doc['_source']['id'] + for doc in res["hits"]["hits"]: + orgID = doc["_source"]["id"] numDocs = session.DB.ES.count( index=session.DB.dbname, - body = {'query': { 'term': {'organisation': orgID}}} - )['count'] + body={"query": {"term": {"organisation": orgID}}}, + )["count"] numSources = session.DB.ES.count( index=session.DB.dbname, doc_type="source", - body = {'query': { 'term': {'organisation': orgID}}} - )['count'] - doc['_source']['sourceCount'] = numSources - doc['_source']['docCount'] = numDocs - orgs.append(doc['_source']) - + body={"query": {"term": {"organisation": orgID}}}, + )["count"] + doc["_source"]["sourceCount"] = numSources + doc["_source"]["docCount"] = numDocs + orgs.append(doc["_source"]) - JSON_OUT = { - 'organisations': orgs, - 'okay': True, - 'responseTime': time.time() - now - } + JSON_OUT = {"organisations": orgs, "okay": True, "responseTime": time.time() - now} yield json.dumps(JSON_OUT) diff --git a/kibble/api/pages/org/members.py b/kibble/api/pages/org/members.py index 71e4be38..79641b64 100644 --- a/kibble/api/pages/org/members.py +++ b/kibble/api/pages/org/members.py @@ -1,4 +1,3 @@ - # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information @@ -112,9 +111,6 @@ ######################################################################## - - - """ This is the Org list renderer for Kibble """ @@ -123,15 +119,18 @@ import time import hashlib + def canInvite(session): """ Determine if the user can edit sources in this org """ - if session.user['userlevel'] == 'admin': + if session.user["userlevel"] == "admin": return True - dOrg = session.user['defaultOrganisation'] or "apache" - if session.DB.ES.exists(index=session.DB.dbname, doc_type="organisation", id= dOrg): - xorg = session.DB.ES.get(index=session.DB.dbname, doc_type="organisation", id= dOrg)['_source'] - if session.user['email'] in xorg['admins']: + dOrg = session.user["defaultOrganisation"] or "apache" + if session.DB.ES.exists(index=session.DB.dbname, doc_type="organisation", id=dOrg): + xorg = session.DB.ES.get( + index=session.DB.dbname, doc_type="organisation", id=dOrg + )["_source"] + if session.user["email"] in xorg["admins"]: return True @@ -141,102 +140,153 @@ def run(API, environ, indata, session): if not session.user: raise API.exception(403, "You must be logged in to use this API endpoint!") - method = environ['REQUEST_METHOD'] + method = environ["REQUEST_METHOD"] ################################################# # Inviting a new member? # ################################################# if method == "PUT": if canInvite(session): - newmember = indata.get('email') - isadmin = indata.get('admin', False) - orgid = session.user['defaultOrganisation'] or "apache" + newmember = indata.get("email") + isadmin = indata.get("admin", False) + orgid = session.user["defaultOrganisation"] or "apache" # Make sure the org exists - if not session.DB.ES.exists(index=session.DB.dbname, doc_type='organisation', id = orgid): + if not session.DB.ES.exists( + index=session.DB.dbname, doc_type="organisation", id=orgid + ): raise API.exception(403, "No such organisation!") # make sure the user account exists - if not session.DB.ES.exists(index=session.DB.dbname, doc_type='useraccount', id = newmember): + if not session.DB.ES.exists( + index=session.DB.dbname, doc_type="useraccount", id=newmember + ): raise API.exception(403, "No such user!") # Modify user account - doc = session.DB.ES.get(index=session.DB.dbname, doc_type='useraccount', id = newmember) - if orgid not in doc['_source']['organisations']: # No duplicates, please - doc['_source']['organisations'].append(orgid) - session.DB.ES.index(index=session.DB.dbname, doc_type='useraccount', id = newmember, body = doc['_source']) - + doc = session.DB.ES.get( + index=session.DB.dbname, doc_type="useraccount", id=newmember + ) + if orgid not in doc["_source"]["organisations"]: # No duplicates, please + doc["_source"]["organisations"].append(orgid) + session.DB.ES.index( + index=session.DB.dbname, + doc_type="useraccount", + id=newmember, + body=doc["_source"], + ) # Get org doc from ES - doc = session.DB.ES.get(index=session.DB.dbname, doc_type='organisation', id = orgid) + doc = session.DB.ES.get( + index=session.DB.dbname, doc_type="organisation", id=orgid + ) if isadmin: - if newmember not in doc['_source']['admins']: - doc['_source']['admins'].append(newmember) + if newmember not in doc["_source"]["admins"]: + doc["_source"]["admins"].append(newmember) # Override old doc - session.DB.ES.index(index=session.DB.dbname, doc_type='organisation', id = orgid, body = doc['_source']) - time.sleep(1) # Bleh!! + session.DB.ES.index( + index=session.DB.dbname, + doc_type="organisation", + id=orgid, + body=doc["_source"], + ) + time.sleep(1) # Bleh!! # If an admin, and not us, and reinvited, we purge the admin bit - elif newmember in doc['_source']['admins']: - if newmember == session.user['email']: - raise API.exception(403, "You can't remove yourself from an organisation.") - doc['_source']['admins'].remove(newmember) + elif newmember in doc["_source"]["admins"]: + if newmember == session.user["email"]: + raise API.exception( + 403, "You can't remove yourself from an organisation." + ) + doc["_source"]["admins"].remove(newmember) # Override old doc - session.DB.ES.index(index=session.DB.dbname, doc_type='organisation', id = orgid, body = doc['_source']) - time.sleep(1) # Bleh!! + session.DB.ES.index( + index=session.DB.dbname, + doc_type="organisation", + id=orgid, + body=doc["_source"], + ) + time.sleep(1) # Bleh!! yield json.dumps({"okay": True, "message": "Member invited!!"}) return else: - raise API.exception(403, "Only administrators or organisation owners can invite new members.") + raise API.exception( + 403, + "Only administrators or organisation owners can invite new members.", + ) ################################################# # DELETE: Remove a member # ################################################# if method == "DELETE": if canInvite(session): - memberid = indata.get('email') - isadmin = indata.get('admin', False) - orgid = session.user['defaultOrganisation'] or "apache" + memberid = indata.get("email") + isadmin = indata.get("admin", False) + orgid = session.user["defaultOrganisation"] or "apache" # We can't remove ourselves! - if memberid == session.user['email']: - raise API.exception(403, "You can't remove yourself from an organisation.") + if memberid == session.user["email"]: + raise API.exception( + 403, "You can't remove yourself from an organisation." + ) # Make sure the org exists - if not session.DB.ES.exists(index=session.DB.dbname, doc_type='organisation', id = orgid): + if not session.DB.ES.exists( + index=session.DB.dbname, doc_type="organisation", id=orgid + ): raise API.exception(403, "No such organisation!") # make sure the user account exists - if not session.DB.ES.exists(index=session.DB.dbname, doc_type='useraccount', id = memberid): + if not session.DB.ES.exists( + index=session.DB.dbname, doc_type="useraccount", id=memberid + ): raise API.exception(403, "No such user!") # Modify user account - doc = session.DB.ES.get(index=session.DB.dbname, doc_type='useraccount', id = memberid) - if orgid in doc['_source']['organisations']: # No duplicates, please - doc['_source']['organisations'].remove(orgid) - session.DB.ES.index(index=session.DB.dbname, doc_type='useraccount', id = memberid, body = doc['_source']) + doc = session.DB.ES.get( + index=session.DB.dbname, doc_type="useraccount", id=memberid + ) + if orgid in doc["_source"]["organisations"]: # No duplicates, please + doc["_source"]["organisations"].remove(orgid) + session.DB.ES.index( + index=session.DB.dbname, + doc_type="useraccount", + id=memberid, + body=doc["_source"], + ) # Check is user is admin and remove if so # Get org doc from ES - doc = session.DB.ES.get(index=session.DB.dbname, doc_type='organisation', id = orgid) - if memberid in doc['_source']['admins']: - doc['_source']['admins'].remove(memberid) + doc = session.DB.ES.get( + index=session.DB.dbname, doc_type="organisation", id=orgid + ) + if memberid in doc["_source"]["admins"]: + doc["_source"]["admins"].remove(memberid) # Override old doc - session.DB.ES.index(index=session.DB.dbname, doc_type='organisation', id = orgid, body = doc['_source']) - time.sleep(1) # Bleh!! + session.DB.ES.index( + index=session.DB.dbname, + doc_type="organisation", + id=orgid, + body=doc["_source"], + ) + time.sleep(1) # Bleh!! yield json.dumps({"okay": True, "message": "Member removed!"}) return else: - raise API.exception(403, "Only administrators or organisation owners can invite new members.") - + raise API.exception( + 403, + "Only administrators or organisation owners can invite new members.", + ) ################################################# # GET/POST: Display members # ################################################# if method in ["GET", "POST"]: - orgid = session.user['defaultOrganisation'] or "apache" - if not session.DB.ES.exists(index=session.DB.dbname, doc_type='organisation', id = orgid): + orgid = session.user["defaultOrganisation"] or "apache" + if not session.DB.ES.exists( + index=session.DB.dbname, doc_type="organisation", id=orgid + ): raise API.exception(403, "No such organisation!") # Only admins should be able to view this! @@ -244,35 +294,25 @@ def run(API, environ, indata, session): raise API.exception(403, "Only organisation owners can view this list.") # Find everyone affiliated with this org - query = { - 'query': { - 'bool': { - 'must': [ - { - 'term': { - 'organisations': orgid - } - } - ] - } - } - } + query = {"query": {"bool": {"must": [{"term": {"organisations": orgid}}]}}} res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="useraccount", - size = 5000, # TO-DO: make this a scroll?? - body = query - ) + index=session.DB.dbname, + doc_type="useraccount", + size=5000, # TO-DO: make this a scroll?? + body=query, + ) members = [] - for doc in res['hits']['hits']: - members.append(doc['_id']) + for doc in res["hits"]["hits"]: + members.append(doc["_id"]) # Get org doc from ES - doc = session.DB.ES.get(index=session.DB.dbname, doc_type='organisation', id = orgid) + doc = session.DB.ES.get( + index=session.DB.dbname, doc_type="organisation", id=orgid + ) JSON_OUT = { - 'members': members, - 'admins': doc['_source']['admins'], - 'okay': True, - 'responseTime': time.time() - now + "members": members, + "admins": doc["_source"]["admins"], + "okay": True, + "responseTime": time.time() - now, } yield json.dumps(JSON_OUT) diff --git a/kibble/api/pages/org/sourcetypes.py b/kibble/api/pages/org/sourcetypes.py index 005f3c87..73c45701 100644 --- a/kibble/api/pages/org/sourcetypes.py +++ b/kibble/api/pages/org/sourcetypes.py @@ -1,4 +1,3 @@ - # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information @@ -62,9 +61,6 @@ ######################################################################## - - - """ This is the source types handler for Kibble """ diff --git a/kibble/api/pages/org/trends.py b/kibble/api/pages/org/trends.py index db2c0586..c34bdd3c 100644 --- a/kibble/api/pages/org/trends.py +++ b/kibble/api/pages/org/trends.py @@ -1,4 +1,3 @@ - # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information @@ -62,9 +61,6 @@ ######################################################################## - - - """ This is the org trend renderer for Kibble """ @@ -72,6 +68,7 @@ import json import time + def run(API, environ, indata, session): # We need to be logged in for this! @@ -82,141 +79,91 @@ def run(API, environ, indata, session): # First, fetch the view if we have such a thing enabled viewList = [] - if indata.get('view'): - if session.DB.ES.exists(index=session.DB.dbname, doc_type="view", id = indata['view']): - view = session.DB.ES.get(index=session.DB.dbname, doc_type="view", id = indata['view']) - viewList = view['_source']['sourceList'] + if indata.get("view"): + if session.DB.ES.exists( + index=session.DB.dbname, doc_type="view", id=indata["view"] + ): + view = session.DB.ES.get( + index=session.DB.dbname, doc_type="view", id=indata["view"] + ) + viewList = view["_source"]["sourceList"] dateTo = int(time.time()) - dateFrom = dateTo - (86400*30*3) # Default to a quarter + dateFrom = dateTo - (86400 * 30 * 3) # Default to a quarter if dateFrom < 0: dateFrom = 0 dateYonder = dateFrom - (dateTo - dateFrom) - - #################################################################### # We start by doing all the queries for THIS period. # # Then we reset the query, and change date to yonder-->from # # and rerun the same queries. # #################################################################### - dOrg = session.user['defaultOrganisation'] or "kibbledemo" + dOrg = session.user["defaultOrganisation"] or "kibbledemo" query = { - 'query': { - 'bool': { - 'must': [ - {'range': - { - 'tsday': { - 'from': dateFrom, - 'to': dateTo - } - } - }, - { - 'term': { - 'organisation': dOrg - } - } - ] - } - } + "query": { + "bool": { + "must": [ + {"range": {"tsday": {"from": dateFrom, "to": dateTo}}}, + {"term": {"organisation": dOrg}}, + ] } + } + } # Get number of commits, this period res = session.DB.ES.count( - index=session.DB.dbname, - doc_type="code_commit", - body = query - ) - no_commits = res['count'] - + index=session.DB.dbname, doc_type="code_commit", body=query + ) + no_commits = res["count"] # Get number of committers, this period - query['aggs'] = { - 'authors': { - 'cardinality': { - 'field': 'author_email' - } - } - - } + query["aggs"] = {"authors": {"cardinality": {"field": "author_email"}}} res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="code_commit", - size = 0, - body = query - ) - no_authors = res['aggregations']['authors']['value'] - + index=session.DB.dbname, doc_type="code_commit", size=0, body=query + ) + no_authors = res["aggregations"]["authors"]["value"] #################################################################### # Change to PRIOR SPAN # #################################################################### - dOrg = session.user['defaultOrganisation'] or "apache" + dOrg = session.user["defaultOrganisation"] or "apache" query = { - 'query': { - 'bool': { - 'must': [ - {'range': - { - 'tsday': { - 'from': dateYonder, - 'to': dateFrom-1 - } - } - }, - { - 'term': { - 'organisation': dOrg - } - } - ] - } - } + "query": { + "bool": { + "must": [ + {"range": {"tsday": {"from": dateYonder, "to": dateFrom - 1}}}, + {"term": {"organisation": dOrg}}, + ] } + } + } # Get number of commits, this period res = session.DB.ES.count( - index=session.DB.dbname, - doc_type="code_commit", - body = query - ) - no_commits_before = res['count'] + index=session.DB.dbname, doc_type="code_commit", body=query + ) + no_commits_before = res["count"] # Get number of committers, this period - query['aggs'] = { - 'authors': { - 'cardinality': { - 'field': 'author_email' - } - } - } + query["aggs"] = {"authors": {"cardinality": {"field": "author_email"}}} res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="code_commit", - size = 0, - body = query - ) - no_authors_before = res['aggregations']['authors']['value'] - + index=session.DB.dbname, doc_type="code_commit", size=0, body=query + ) + no_authors_before = res["aggregations"]["authors"]["value"] trends = { "authors": { - 'before': no_authors_before, - 'after': no_authors, - 'title': "Contributors this quarter" + "before": no_authors_before, + "after": no_authors, + "title": "Contributors this quarter", + }, + "commits": { + "before": no_commits_before, + "after": no_commits, + "title": "Commits this quarter", }, - 'commits': { - 'before': no_commits_before, - 'after': no_commits, - 'title': "Commits this quarter" - } } - JSON_OUT = { - 'trends': trends, - 'okay': True, - 'responseTime': time.time() - now - } + JSON_OUT = {"trends": trends, "okay": True, "responseTime": time.time() - now} yield json.dumps(JSON_OUT) diff --git a/kibble/api/pages/session.py b/kibble/api/pages/session.py index eebd9136..225927bf 100644 --- a/kibble/api/pages/session.py +++ b/kibble/api/pages/session.py @@ -1,4 +1,3 @@ - # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information @@ -91,9 +90,6 @@ ######################################################################## - - - """ This is the user session handler for Kibble """ @@ -105,42 +101,57 @@ import hashlib import uuid + def run(API, environ, indata, session): - method = environ['REQUEST_METHOD'] + method = environ["REQUEST_METHOD"] # Logging in? if method == "PUT": - u = indata['email'] - p = indata['password'] - if session.DB.ES.exists(index=session.DB.dbname, doc_type='useraccount', id = u): - doc = session.DB.ES.get(index=session.DB.dbname, doc_type='useraccount', id = u) - hp = doc['_source']['password'] - if bcrypt.hashpw(p.encode('utf-8'), hp.encode('utf-8')).decode('ascii') == hp: + u = indata["email"] + p = indata["password"] + if session.DB.ES.exists(index=session.DB.dbname, doc_type="useraccount", id=u): + doc = session.DB.ES.get( + index=session.DB.dbname, doc_type="useraccount", id=u + ) + hp = doc["_source"]["password"] + if ( + bcrypt.hashpw(p.encode("utf-8"), hp.encode("utf-8")).decode("ascii") + == hp + ): # If verification is enabled, make sure account is verified - if session.config['accounts'].get('verify'): - if doc['_source']['verified'] == False: - raise API.exception(403, "Your account needs to be verified first. Check your inbox!") + if session.config["accounts"].get("verify"): + if doc["_source"]["verified"] == False: + raise API.exception( + 403, + "Your account needs to be verified first. Check your inbox!", + ) sessionDoc = { - 'cid': u, - 'id': session.cookie, - 'timestamp': int(time.time()) + "cid": u, + "id": session.cookie, + "timestamp": int(time.time()), } - session.DB.ES.index(index=session.DB.dbname, doc_type='uisession', id = session.cookie, body = sessionDoc) + session.DB.ES.index( + index=session.DB.dbname, + doc_type="uisession", + id=session.cookie, + body=sessionDoc, + ) yield json.dumps({"message": "Logged in OK!"}) return # Fall back to a 403 if username and password did not match raise API.exception(403, "Wrong username or password supplied!") - # We need to be logged in for the rest of this! if not session.user: raise API.exception(403, "You must be logged in to use this API endpoint! %s") # Delete a session (log out) if method == "DELETE": - session.DB.ES.delete(index=session.DB.dbname, doc_type='uisession', id = session.cookie) + session.DB.ES.delete( + index=session.DB.dbname, doc_type="uisession", id=session.cookie + ) session.newCookie() yield json.dumps({"message": "Logged out, bye bye!"}) @@ -148,37 +159,38 @@ def run(API, environ, indata, session): if method == "GET": # Do we have an API key? If not, make one - if not session.user.get('token') or indata.get('newtoken'): + if not session.user.get("token") or indata.get("newtoken"): token = str(uuid.uuid4()) - session.user['token'] = token - session.DB.ES.index(index=session.DB.dbname, doc_type='useraccount', id = session.user['email'], body = session.user) + session.user["token"] = token + session.DB.ES.index( + index=session.DB.dbname, + doc_type="useraccount", + id=session.user["email"], + body=session.user, + ) # Run a quick search of all orgs we have. res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="organisation", - size = 100, - body = { - 'query': { - 'match_all': {} - } - } - ) + index=session.DB.dbname, + doc_type="organisation", + size=100, + body={"query": {"match_all": {}}}, + ) orgs = [] - for hit in res['hits']['hits']: - doc = hit['_source'] + for hit in res["hits"]["hits"]: + doc = hit["_source"] orgs.append(doc) JSON_OUT = { - 'email': session.user['email'], - 'displayName': session.user['displayName'], - 'defaultOrganisation': session.user['defaultOrganisation'], - 'organisations': session.user['organisations'], - 'ownerships': session.user['ownerships'], - 'gravatar': hashlib.md5(session.user['email'].encode('utf-8')).hexdigest(), - 'userlevel': session.user['userlevel'], - 'token': session.user['token'] + "email": session.user["email"], + "displayName": session.user["displayName"], + "defaultOrganisation": session.user["defaultOrganisation"], + "organisations": session.user["organisations"], + "ownerships": session.user["ownerships"], + "gravatar": hashlib.md5(session.user["email"].encode("utf-8")).hexdigest(), + "userlevel": session.user["userlevel"], + "token": session.user["token"], } yield json.dumps(JSON_OUT) return diff --git a/kibble/api/pages/sources.py b/kibble/api/pages/sources.py index 49f9fec6..0b737ee1 100644 --- a/kibble/api/pages/sources.py +++ b/kibble/api/pages/sources.py @@ -1,4 +1,3 @@ - # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information @@ -120,9 +119,6 @@ ######################################################################## - - - """ This is the source list handler for Kibble """ @@ -140,86 +136,88 @@ def canModifySource(session): """ Determine if the user can edit sources in this org """ - dOrg = session.user['defaultOrganisation'] or "apache" - if session.DB.ES.exists(index=session.DB.dbname, doc_type="organisation", id= dOrg): - xorg = session.DB.ES.get(index=session.DB.dbname, doc_type="organisation", id= dOrg)['_source'] - if session.user['email'] in xorg['admins']: + dOrg = session.user["defaultOrganisation"] or "apache" + if session.DB.ES.exists(index=session.DB.dbname, doc_type="organisation", id=dOrg): + xorg = session.DB.ES.get( + index=session.DB.dbname, doc_type="organisation", id=dOrg + )["_source"] + if session.user["email"] in xorg["admins"]: return True - if session.user['userlevel'] == 'admin': + if session.user["userlevel"] == "admin": return True return False + def run(API, environ, indata, session): # We need to be logged in for this! if not session.user: raise API.exception(403, "You must be logged in to use this API endpoint! %s") - method = environ['REQUEST_METHOD'] - dOrg = session.user['defaultOrganisation'] + method = environ["REQUEST_METHOD"] + dOrg = session.user["defaultOrganisation"] - if method in ['GET', 'POST']: + if method in ["GET", "POST"]: # Fetch organisation data # Make sure we have a default/current org set - if 'defaultOrganisation' not in session.user or not session.user['defaultOrganisation']: - raise API.exception(400, "You must specify an organisation as default/current in order to add sources.") + if ( + "defaultOrganisation" not in session.user + or not session.user["defaultOrganisation"] + ): + raise API.exception( + 400, + "You must specify an organisation as default/current in order to add sources.", + ) - if session.DB.ES.exists(index=session.DB.dbname, doc_type="organisation", id= dOrg): - org = session.DB.ES.get(index=session.DB.dbname, doc_type="organisation", id= dOrg)['_source'] - del org['admins'] + if session.DB.ES.exists( + index=session.DB.dbname, doc_type="organisation", id=dOrg + ): + org = session.DB.ES.get( + index=session.DB.dbname, doc_type="organisation", id=dOrg + )["_source"] + del org["admins"] else: raise API.exception(404, "No such organisation, '%s'" % (dOrg or "(None)")) - sourceTypes = indata.get('types', []) + sourceTypes = indata.get("types", []) # Fetch all sources for default org res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="source", - size = 5000, - body = { - 'query': { - 'term': { - 'organisation': dOrg - } - } - } - ) + index=session.DB.dbname, + doc_type="source", + size=5000, + body={"query": {"term": {"organisation": dOrg}}}, + ) # Secondly, fetch the view if we have such a thing enabled viewList = [] - if indata.get('view'): - viewList = session.getView(indata.get('view')) - if indata.get('subfilter') and indata.get('quick'): - viewList = session.subFilter(indata.get('subfilter'), view = viewList) - + if indata.get("view"): + viewList = session.getView(indata.get("view")) + if indata.get("subfilter") and indata.get("quick"): + viewList = session.subFilter(indata.get("subfilter"), view=viewList) sources = [] - for hit in res['hits']['hits']: - doc = hit['_source'] - if viewList and not doc['sourceID'] in viewList: + for hit in res["hits"]["hits"]: + doc = hit["_source"] + if viewList and not doc["sourceID"] in viewList: continue - if sourceTypes and not doc['type'] in sourceTypes: + if sourceTypes and not doc["type"] in sourceTypes: continue - if indata.get('quick'): + if indata.get("quick"): xdoc = { - 'sourceID': doc['sourceID'], - 'type': doc['type'], - 'sourceURL': doc['sourceURL'] - } + "sourceID": doc["sourceID"], + "type": doc["type"], + "sourceURL": doc["sourceURL"], + } sources.append(xdoc) else: # Creds should be anonymous here - if 'creds' in doc: - del doc['creds'] + if "creds" in doc: + del doc["creds"] sources.append(doc) - JSON_OUT = { - 'sources': sources, - 'okay': True, - 'organisation': org - } + JSON_OUT = {"sources": sources, "okay": True, "organisation": org} yield json.dumps(JSON_OUT) return @@ -230,56 +228,76 @@ def run(API, environ, indata, session): old = 0 with open(os.path.join(YAML_DIRECTORY, "sourcetypes.yaml")) as f: stypes = yaml.load(f) - for source in indata.get('sources', []): - sourceURL = source['sourceURL'] - sourceType = source['type'] + for source in indata.get("sources", []): + sourceURL = source["sourceURL"] + sourceType = source["type"] creds = {} if not sourceType in stypes: raise API.exception(400, "Attempt to add unknown source type!") - if 'optauth' in stypes[sourceType]: - for el in stypes[sourceType]['optauth']: + if "optauth" in stypes[sourceType]: + for el in stypes[sourceType]["optauth"]: if el in source and len(source[el]) > 0: creds[el] = source[el] - sourceID = hashlib.sha224( ("%s-%s" % (sourceType, sourceURL)).encode('utf-8') ).hexdigest() + sourceID = hashlib.sha224( + ("%s-%s" % (sourceType, sourceURL)).encode("utf-8") + ).hexdigest() # Make sure we have a default/current org set - if 'defaultOrganisation' not in session.user or not session.user['defaultOrganisation']: - raise API.exception(400, "You must first specify an organisation as default/current in order to add sources.") + if ( + "defaultOrganisation" not in session.user + or not session.user["defaultOrganisation"] + ): + raise API.exception( + 400, + "You must first specify an organisation as default/current in order to add sources.", + ) doc = { - 'organisation': dOrg, - 'sourceURL': sourceURL, - 'sourceID': sourceID, - 'type': sourceType, - 'creds': creds, - 'steps': {} + "organisation": dOrg, + "sourceURL": sourceURL, + "sourceID": sourceID, + "type": sourceType, + "creds": creds, + "steps": {}, } - if session.DB.ES.exists(index=session.DB.dbname, doc_type="source", id = sourceID): + if session.DB.ES.exists( + index=session.DB.dbname, doc_type="source", id=sourceID + ): old += 1 else: new += 1 - session.DB.ES.index(index=session.DB.dbname, doc_type="source", id = sourceID, body = doc) - yield json.dumps({ - "message": "Sources added/updated", - "added": new, - "updated": old - }) + session.DB.ES.index( + index=session.DB.dbname, doc_type="source", id=sourceID, body=doc + ) + yield json.dumps( + {"message": "Sources added/updated", "added": new, "updated": old} + ) else: - raise API.exception(403, "You don't have permission to add sources to this organisation.") + raise API.exception( + 403, "You don't have permission to add sources to this organisation." + ) # Delete a source if method == "DELETE": if canModifySource(session): - sourceID = indata.get('id') - if session.DB.ES.exists(index=session.DB.dbname, doc_type="source", id = sourceID): + sourceID = indata.get("id") + if session.DB.ES.exists( + index=session.DB.dbname, doc_type="source", id=sourceID + ): # Delete all data pertainig to this source # For ES >= 6.x, use a glob for removing from all indices if session.DB.ESversion > 5: - session.DB.ES.delete_by_query(index=session.DB.dbname+'_*', body = {'query': {'match': {'sourceID': sourceID}}}) + session.DB.ES.delete_by_query( + index=session.DB.dbname + "_*", + body={"query": {"match": {"sourceID": sourceID}}}, + ) else: - # For ES <= 5.x, just remove from the main index - session.DB.ES.delete_by_query(index=session.DB.dbname, body = {'query': {'match': {'sourceID': sourceID}}}) - yield json.dumps({'message': "Source deleted"}) + # For ES <= 5.x, just remove from the main index + session.DB.ES.delete_by_query( + index=session.DB.dbname, + body={"query": {"match": {"sourceID": sourceID}}}, + ) + yield json.dumps({"message": "Source deleted"}) else: raise API.exception(404, "No such source item") else: diff --git a/kibble/api/pages/verify.py b/kibble/api/pages/verify.py index 3ff61f7c..bc4f3167 100644 --- a/kibble/api/pages/verify.py +++ b/kibble/api/pages/verify.py @@ -50,9 +50,6 @@ ######################################################################## - - - """ This is the user account verifier for Kibble. """ @@ -61,24 +58,33 @@ def run(API, environ, indata, session): # Get vocde, make sure it's 40 chars - vcode = indata.get('vcode') + vcode = indata.get("vcode") if len(vcode) != 40: raise API.exception(400, "Invalid verification code!") # Find the account with this vcode - email = indata.get('email') + email = indata.get("email") if len(email) < 7: raise API.exception(400, "Invalid email address presented.") - if session.DB.ES.exists(index=session.DB.dbname, doc_type='useraccount', id = email): - doc = session.DB.ES.get(index=session.DB.dbname, doc_type='useraccount', id = email) + if session.DB.ES.exists(index=session.DB.dbname, doc_type="useraccount", id=email): + doc = session.DB.ES.get( + index=session.DB.dbname, doc_type="useraccount", id=email + ) # Do the codes match?? - if doc['_source']['vcode'] == vcode: - doc['_source']['verified'] = True + if doc["_source"]["vcode"] == vcode: + doc["_source"]["verified"] = True # Save account as verified - session.DB.ES.index(index=session.DB.dbname, doc_type='useraccount', id = email, body = doc['_source']) - yield("Your account has been verified, you can now log in!") + session.DB.ES.index( + index=session.DB.dbname, + doc_type="useraccount", + id=email, + body=doc["_source"], + ) + yield ("Your account has been verified, you can now log in!") else: raise API.exception(404, "Invalid verification code presented!") else: - raise API.exception(404, "Invalid verification code presented!") # Don't give away if such a user exists, pssst + raise API.exception( + 404, "Invalid verification code presented!" + ) # Don't give away if such a user exists, pssst diff --git a/kibble/api/pages/views.py b/kibble/api/pages/views.py index 2fd1a501..a70607bb 100644 --- a/kibble/api/pages/views.py +++ b/kibble/api/pages/views.py @@ -1,4 +1,3 @@ - # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information @@ -134,9 +133,6 @@ ######################################################################## - - - """ This is the views (filters) list handler for Kibble """ @@ -146,163 +142,179 @@ import time import hashlib + def run(API, environ, indata, session): # We need to be logged in for this! if not session.user: raise API.exception(403, "You must be logged in to use this API endpoint! %s") - method = environ['REQUEST_METHOD'] - dOrg = session.user['defaultOrganisation'] or "apache" + method = environ["REQUEST_METHOD"] + dOrg = session.user["defaultOrganisation"] or "apache" # Are we adding a view? - if method == 'PUT': - viewID = hashlib.sha224( ("%s-%s-%s" % (time.time(), session.user['email'], dOrg) ).encode('utf-8') ).hexdigest() - sources = indata.get('sources', []) - name = indata.get('name', "unknown view") - public = indata.get('public', False) + if method == "PUT": + viewID = hashlib.sha224( + ("%s-%s-%s" % (time.time(), session.user["email"], dOrg)).encode("utf-8") + ).hexdigest() + sources = indata.get("sources", []) + name = indata.get("name", "unknown view") + public = indata.get("public", False) if public: - if not (session.user['userlevel'] == 'admin' or dOrg in session.user['ownerships']): - raise API.exception(403, "Only owners of an organisation may create public views.") + if not ( + session.user["userlevel"] == "admin" + or dOrg in session.user["ownerships"] + ): + raise API.exception( + 403, "Only owners of an organisation may create public views." + ) doc = { - 'id': viewID, - 'email': session.user['email'], - 'organisation': dOrg, - 'sourceList': sources, - 'name': name, - 'created': int(time.time()), - 'publicView': public + "id": viewID, + "email": session.user["email"], + "organisation": dOrg, + "sourceList": sources, + "name": name, + "created": int(time.time()), + "publicView": public, } - session.DB.ES.index(index=session.DB.dbname, doc_type="view", id = viewID, body = doc) - yield json.dumps({'okay': True, 'message': "View created"}) + session.DB.ES.index( + index=session.DB.dbname, doc_type="view", id=viewID, body=doc + ) + yield json.dumps({"okay": True, "message": "View created"}) # Are we editing (patching) a view? - if method == 'PATCH': - viewID = indata.get('id') - if viewID and session.DB.ES.exists(index=session.DB.dbname, doc_type="view", id = viewID): - doc = session.DB.ES.get(index=session.DB.dbname, doc_type="view", id = viewID) - if session.user['userlevel'] == 'admin' or doc['_source']['email'] == session.user['email']: - sources = indata.get('sources', []) - doc['_source']['sourceList'] = sources - session.DB.ES.index(index=session.DB.dbname, doc_type="view", id = viewID, body = doc['_source']) - yield json.dumps({'okay': True, 'message': "View updated"}) + if method == "PATCH": + viewID = indata.get("id") + if viewID and session.DB.ES.exists( + index=session.DB.dbname, doc_type="view", id=viewID + ): + doc = session.DB.ES.get(index=session.DB.dbname, doc_type="view", id=viewID) + if ( + session.user["userlevel"] == "admin" + or doc["_source"]["email"] == session.user["email"] + ): + sources = indata.get("sources", []) + doc["_source"]["sourceList"] = sources + session.DB.ES.index( + index=session.DB.dbname, + doc_type="view", + id=viewID, + body=doc["_source"], + ) + yield json.dumps({"okay": True, "message": "View updated"}) else: raise API.exception(403, "You don't own this view, and cannot edit it.") else: raise API.exception(404, "We couldn't find a view with this ID.") # Removing a view? - if method == 'DELETE': - viewID = indata.get('id') - if viewID and session.DB.ES.exists(index=session.DB.dbname, doc_type="view", id = viewID): - doc = session.DB.ES.get(index=session.DB.dbname, doc_type="view", id = viewID) - if session.user['userlevel'] == 'admin' or doc['_source']['email'] == session.user['email']: - session.DB.ES.delete(index=session.DB.dbname, doc_type="view", id = viewID) - yield json.dumps({'okay': True, 'message': "View deleted"}) + if method == "DELETE": + viewID = indata.get("id") + if viewID and session.DB.ES.exists( + index=session.DB.dbname, doc_type="view", id=viewID + ): + doc = session.DB.ES.get(index=session.DB.dbname, doc_type="view", id=viewID) + if ( + session.user["userlevel"] == "admin" + or doc["_source"]["email"] == session.user["email"] + ): + session.DB.ES.delete( + index=session.DB.dbname, doc_type="view", id=viewID + ) + yield json.dumps({"okay": True, "message": "View deleted"}) else: - raise API.exception(403, "You don't own this view, and cannot delete it.") + raise API.exception( + 403, "You don't own this view, and cannot delete it." + ) else: raise API.exception(404, "We couldn't find a view with this ID.") - - if method in ['GET', 'POST']: + if method in ["GET", "POST"]: # Fetch all views for default org res = session.DB.ES.search( - index=session.DB.dbname, - doc_type="view", - size = 5000, - body = { - 'query': { - 'term': { - 'email': session.user['email'] - } - } - } - ) - + index=session.DB.dbname, + doc_type="view", + size=5000, + body={"query": {"term": {"email": session.user["email"]}}}, + ) # Are we looking at someone elses view? - if indata.get('view'): - viewID = indata.get('view') - if session.DB.ES.exists(index=session.DB.dbname, doc_type="view", id = viewID): - blob = session.DB.ES.get(index=session.DB.dbname, doc_type="view", id = viewID) - if blob['_source']['email'] != session.user['email'] and not blob['_source']['publicView']: - blob['_source']['name'] += " (shared by " + blob['_source']['email'] + ")" - res['hits']['hits'].append(blob) + if indata.get("view"): + viewID = indata.get("view") + if session.DB.ES.exists( + index=session.DB.dbname, doc_type="view", id=viewID + ): + blob = session.DB.ES.get( + index=session.DB.dbname, doc_type="view", id=viewID + ) + if ( + blob["_source"]["email"] != session.user["email"] + and not blob["_source"]["publicView"] + ): + blob["_source"]["name"] += ( + " (shared by " + blob["_source"]["email"] + ")" + ) + res["hits"]["hits"].append(blob) sources = [] # Include public views?? - if not indata.get('sources', False): + if not indata.get("sources", False): pres = session.DB.ES.search( - index=session.DB.dbname, - doc_type="view", - size = 5000, - body = { - 'query': { - 'bool': { - 'must': [ - {'term': - { - 'publicView': True - } - }, - { - 'term': { - 'organisation': dOrg - } - } - ] - } + index=session.DB.dbname, + doc_type="view", + size=5000, + body={ + "query": { + "bool": { + "must": [ + {"term": {"publicView": True}}, + {"term": {"organisation": dOrg}}, + ] } } - ) - for hit in pres['hits']['hits']: - if hit['_source']['email'] != session.user['email']: - hit['_source']['name'] += " (shared view)" - res['hits']['hits'].append(hit) + }, + ) + for hit in pres["hits"]["hits"]: + if hit["_source"]["email"] != session.user["email"]: + hit["_source"]["name"] += " (shared view)" + res["hits"]["hits"].append(hit) - for hit in res['hits']['hits']: - doc = hit['_source'] - if doc['organisation'] != dOrg: + for hit in res["hits"]["hits"]: + doc = hit["_source"] + if doc["organisation"] != dOrg: continue - if indata.get('quick'): + if indata.get("quick"): xdoc = { - 'id': doc['id'], - 'name': doc['name'], - 'organisation': doc['organisation'] - } + "id": doc["id"], + "name": doc["name"], + "organisation": doc["organisation"], + } sources.append(xdoc) else: sources.append(doc) allsources = [] - if indata.get('sources', False): + if indata.get("sources", False): res = session.DB.ES.search( index=session.DB.dbname, doc_type="source", - size = 5000, - body = { - 'query': { - 'term': { - 'organisation': dOrg - } - } - } + size=5000, + body={"query": {"term": {"organisation": dOrg}}}, ) - for zdoc in res['hits']['hits']: - doc = zdoc['_source'] + for zdoc in res["hits"]["hits"]: + doc = zdoc["_source"] xdoc = { - 'sourceID': doc['sourceID'], - 'type': doc['type'], - 'sourceURL': doc['sourceURL'] - } + "sourceID": doc["sourceID"], + "type": doc["type"], + "sourceURL": doc["sourceURL"], + } allsources.append(xdoc) JSON_OUT = { - 'views': sources, - 'sources': allsources, - 'okay': True, - 'organisation': dOrg + "views": sources, + "sources": allsources, + "okay": True, + "organisation": dOrg, } yield json.dumps(JSON_OUT) diff --git a/kibble/api/pages/widgets.py b/kibble/api/pages/widgets.py index 6b101e21..53f7ced8 100644 --- a/kibble/api/pages/widgets.py +++ b/kibble/api/pages/widgets.py @@ -62,10 +62,10 @@ def run(API, environ, indata, session): with open(os.path.join(YAML_DIRECTORY, "widgets.yaml")) as f: widgets = yaml.load(f) - page = indata['pageid'] - if not page or page == '0': - page = widgets.get('defaultWidget', 'repos') - if page in widgets['widgets']: - yield json.dumps(widgets['widgets'][page]) + page = indata["pageid"] + if not page or page == "0": + page = widgets.get("defaultWidget", "repos") + if page in widgets["widgets"]: + yield json.dumps(widgets["widgets"][page]) else: raise API.exception(404, "Widget design not found!") diff --git a/kibble/api/plugins/database.py b/kibble/api/plugins/database.py index c36001de..fb1a9fab 100644 --- a/kibble/api/plugins/database.py +++ b/kibble/api/plugins/database.py @@ -28,97 +28,123 @@ class KibbleESWrapper(object): Class for rewriting old-style queries to the new ones, where doc_type is an integral part of the DB name """ + def __init__(self, ES): self.ES = ES def get(self, index, doc_type, id): - return self.ES.get(index = index+'_'+doc_type, doc_type = '_doc', id = id) + return self.ES.get(index=index + "_" + doc_type, doc_type="_doc", id=id) + def exists(self, index, doc_type, id): - return self.ES.exists(index = index+'_'+doc_type, doc_type = '_doc', id = id) + return self.ES.exists(index=index + "_" + doc_type, doc_type="_doc", id=id) + def delete(self, index, doc_type, id): - return self.ES.delete(index = index+'_'+doc_type, doc_type = '_doc', id = id) + return self.ES.delete(index=index + "_" + doc_type, doc_type="_doc", id=id) + def index(self, index, doc_type, id, body): - return self.ES.index(index = index+'_'+doc_type, doc_type = '_doc', id = id, body = body) + return self.ES.index( + index=index + "_" + doc_type, doc_type="_doc", id=id, body=body + ) + def update(self, index, doc_type, id, body): - return self.ES.update(index = index+'_'+doc_type, doc_type = '_doc', id = id, body = body) + return self.ES.update( + index=index + "_" + doc_type, doc_type="_doc", id=id, body=body + ) + def scroll(self, scroll_id, scroll): - return self.ES.scroll(scroll_id = scroll_id, scroll = scroll) + return self.ES.scroll(scroll_id=scroll_id, scroll=scroll) + def delete_by_query(self, **kwargs): return self.ES.delete_by_query(**kwargs) - def search(self, index, doc_type, size = 100, scroll = None, _source_include = None, body = None): + + def search( + self, index, doc_type, size=100, scroll=None, _source_include=None, body=None + ): return self.ES.search( - index = index+'_'+doc_type, - doc_type = '_doc', - size = size, - scroll = scroll, - _source_include = _source_include, - body = body - ) - def count(self, index, doc_type = '*', body = None): - return self.ES.count( - index = index+'_'+doc_type, - doc_type = '_doc', - body = body - ) + index=index + "_" + doc_type, + doc_type="_doc", + size=size, + scroll=scroll, + _source_include=_source_include, + body=body, + ) + + def count(self, index, doc_type="*", body=None): + return self.ES.count(index=index + "_" + doc_type, doc_type="_doc", body=body) class KibbleESWrapperSeven(object): """ - Class for rewriting old-style queries to the >= 7.x ones, - where doc_type is an integral part of the DB name and NO DOC_TYPE! + Class for rewriting old-style queries to the >= 7.x ones, + where doc_type is an integral part of the DB name and NO DOC_TYPE! """ + def __init__(self, ES): self.ES = ES def get(self, index, doc_type, id): - return self.ES.get(index = index+'_'+doc_type, id = id) + return self.ES.get(index=index + "_" + doc_type, id=id) + def exists(self, index, doc_type, id): - return self.ES.exists(index = index+'_'+doc_type, id = id) + return self.ES.exists(index=index + "_" + doc_type, id=id) + def delete(self, index, doc_type, id): - return self.ES.delete(index = index+'_'+doc_type, id = id) + return self.ES.delete(index=index + "_" + doc_type, id=id) + def index(self, index, doc_type, id, body): - return self.ES.index(index = index+'_'+doc_type, id = id, body = body) + return self.ES.index(index=index + "_" + doc_type, id=id, body=body) + def update(self, index, doc_type, id, body): - return self.ES.update(index = index+'_'+doc_type, id = id, body = body) + return self.ES.update(index=index + "_" + doc_type, id=id, body=body) + def scroll(self, scroll_id, scroll): - return self.ES.scroll(scroll_id = scroll_id, scroll = scroll) + return self.ES.scroll(scroll_id=scroll_id, scroll=scroll) + def delete_by_query(self, **kwargs): return self.ES.delete_by_query(**kwargs) - def search(self, index, doc_type, size = 100, scroll = None, _source_include = None, body = None): + + def search( + self, index, doc_type, size=100, scroll=None, _source_include=None, body=None + ): return self.ES.search( - index = index+'_'+doc_type, - size = size, - scroll = scroll, - _source_includes = _source_include, - body = body - ) - def count(self, index, doc_type = '*', body = None): - return self.ES.count( - index = index+'_'+doc_type, - body = body - ) + index=index + "_" + doc_type, + size=size, + scroll=scroll, + _source_includes=_source_include, + body=body, + ) + + def count(self, index, doc_type="*", body=None): + return self.ES.count(index=index + "_" + doc_type, body=body) class KibbleDatabase(object): def __init__(self, config): self.config = config - self.dbname = config['elasticsearch']['dbname'] - self.ES = elasticsearch.Elasticsearch([{ - 'host': config['elasticsearch']['host'], - 'port': int(config['elasticsearch']['port']), - 'use_ssl': config['elasticsearch']['ssl'], - 'verify_certs': False, - 'url_prefix': config['elasticsearch']['uri'] if 'uri' in config['elasticsearch'] else '', - 'http_auth': config['elasticsearch']['auth'] if 'auth' in config['elasticsearch'] else None - }], - max_retries=5, - retry_on_timeout=True - ) + self.dbname = config["elasticsearch"]["dbname"] + self.ES = elasticsearch.Elasticsearch( + [ + { + "host": config["elasticsearch"]["host"], + "port": int(config["elasticsearch"]["port"]), + "use_ssl": config["elasticsearch"]["ssl"], + "verify_certs": False, + "url_prefix": config["elasticsearch"]["uri"] + if "uri" in config["elasticsearch"] + else "", + "http_auth": config["elasticsearch"]["auth"] + if "auth" in config["elasticsearch"] + else None, + } + ], + max_retries=5, + retry_on_timeout=True, + ) # IMPORTANT BIT: Figure out if this is ES < 6.x, 6.x or >= 7.x. # If so, we're using the new ES DB mappings, and need to adjust ALL # ES calls to match this. - self.ESversion = int(self.ES.info()['version']['number'].split('.')[0]) + self.ESversion = int(self.ES.info()["version"]["number"].split(".")[0]) if self.ESversion >= 7: self.ES = KibbleESWrapperSeven(self.ES) elif self.ESVersion >= 6: diff --git a/kibble/api/plugins/openapi.py b/kibble/api/plugins/openapi.py index f2894700..36242e1c 100644 --- a/kibble/api/plugins/openapi.py +++ b/kibble/api/plugins/openapi.py @@ -35,23 +35,24 @@ def __init__(self, message): # Python type names to JSON type names py2JSON = { - 'int': 'integer', - 'float': 'float', - 'str': 'string', - 'list': 'array', - 'dict': 'object', - 'bool': 'boolean' + "int": "integer", + "float": "float", + "str": "string", + "list": "array", + "dict": "object", + "bool": "boolean", } mcolors = { - 'PUT': '#fca130', - 'DELETE': '#f93e3e', - 'GET': '#61affe', - 'POST': '#49cc5c', - 'PATCH': '#d5a37e' + "PUT": "#fca130", + "DELETE": "#f93e3e", + "GET": "#61affe", + "POST": "#49cc5c", + "PATCH": "#d5a37e", } -class OpenAPI(): + +class OpenAPI: def __init__(self, APIFile): """ Instantiates an OpenAPI validator given a YAML specification""" if APIFile.endswith(".json") or APIFile.endswith(".js"): @@ -68,172 +69,218 @@ def validateType(self, field, value, ftype): # Check if type matches if ftype != jsonType: - raise OpenAPIException("OpenAPI mismatch: Field '%s' was expected to be %s, but was really %s!" % (field, ftype, jsonType)) + raise OpenAPIException( + "OpenAPI mismatch: Field '%s' was expected to be %s, but was really %s!" + % (field, ftype, jsonType) + ) - def validateSchema(self, pdef, formdata, schema = None): + def validateSchema(self, pdef, formdata, schema=None): """ Validate (sub)parameters against OpenAPI specs """ # allOf: list of schemas to validate against - if 'allOf' in pdef: - for subdef in pdef['allOf']: + if "allOf" in pdef: + for subdef in pdef["allOf"]: self.validateSchema(subdef, formdata) where = "JSON body" # Symbolic link?? - if 'schema' in pdef: - schema = pdef['schema']['$ref'] - if '$ref' in pdef: - schema = pdef['$ref'] + if "schema" in pdef: + schema = pdef["schema"]["$ref"] + if "$ref" in pdef: + schema = pdef["$ref"] if schema: # #/foo/bar/baz --> dict['foo']['bar']['baz'] - pdef = functools.reduce(operator.getitem, schema.split('/')[1:], self.API) + pdef = functools.reduce(operator.getitem, schema.split("/")[1:], self.API) where = "item matching schema %s" % schema # Check that all required fields are present - if 'required' in pdef: - for field in pdef['required']: + if "required" in pdef: + for field in pdef["required"]: if not field in formdata: - raise OpenAPIException("OpenAPI mismatch: Missing input field '%s' in %s!" % (field, where)) + raise OpenAPIException( + "OpenAPI mismatch: Missing input field '%s' in %s!" + % (field, where) + ) # Now check for valid format of input data for field in formdata: - if 'properties' not in pdef or field not in pdef['properties'] : - raise OpenAPIException("Unknown input field '%s' in %s!" % (field, where)) - if 'type' not in pdef['properties'][field]: - raise OpenAPIException("OpenAPI mismatch: Field '%s' was found in api.yaml, but no format was specified in specs!" % field) - ftype = pdef['properties'][field]['type'] + if "properties" not in pdef or field not in pdef["properties"]: + raise OpenAPIException( + "Unknown input field '%s' in %s!" % (field, where) + ) + if "type" not in pdef["properties"][field]: + raise OpenAPIException( + "OpenAPI mismatch: Field '%s' was found in api.yaml, but no format was specified in specs!" + % field + ) + ftype = pdef["properties"][field]["type"] self.validateType(field, formdata[field], ftype) # Validate sub-arrays - if ftype == 'array' and 'items' in pdef['properties'][field]: + if ftype == "array" and "items" in pdef["properties"][field]: for item in formdata[field]: - if '$ref' in pdef['properties'][field]['items']: - self.validateSchema(pdef['properties'][field]['items'], item) + if "$ref" in pdef["properties"][field]["items"]: + self.validateSchema(pdef["properties"][field]["items"], item) else: - self.validateType(field, formdata[field], pdef['properties'][field]['items']['type']) + self.validateType( + field, + formdata[field], + pdef["properties"][field]["items"]["type"], + ) # Validate sub-hashes - if ftype == 'hash' and 'schema' in pdef['properties'][field]: - self.validateSchema(pdef['properties'][field], formdata[field]) + if ftype == "hash" and "schema" in pdef["properties"][field]: + self.validateSchema(pdef["properties"][field], formdata[field]) + def validateParameters(self, defs, formdata): # pass - def validate(self, method = "GET", path = "/foo", formdata = None): + def validate(self, method="GET", path="/foo", formdata=None): """ Validate the request method and input data against the OpenAPI specification """ # Make sure we're not dealing with a dynamic URL. # If we find /foo/{key}, we fold that into the form data # and process as if it's a json input field for now. - if not self.API['paths'].get(path): - for xpath in self.API['paths']: + if not self.API["paths"].get(path): + for xpath in self.API["paths"]: pathRE = re.sub(r"\{(.+?)\}", r"(?P<\1>[^/]+)", xpath) m = re.match(pathRE, path) if m: - for k,v in m.groupdict().items(): + for k, v in m.groupdict().items(): formdata[k] = v path = xpath break - if self.API['paths'].get(path): - defs = self.API['paths'].get(path) + if self.API["paths"].get(path): + defs = self.API["paths"].get(path) method = method.lower() if method in defs: mdefs = defs[method] - if formdata and 'parameters' in mdefs: - self.validateParameters(mdefs['parameters'], formdata) - elif formdata and 'requestBody' not in mdefs: - raise OpenAPIException("OpenAPI mismatch: JSON data is now allowed for this request type") - elif formdata and 'requestBody' in mdefs and 'content' in mdefs['requestBody']: + if formdata and "parameters" in mdefs: + self.validateParameters(mdefs["parameters"], formdata) + elif formdata and "requestBody" not in mdefs: + raise OpenAPIException( + "OpenAPI mismatch: JSON data is now allowed for this request type" + ) + elif ( + formdata + and "requestBody" in mdefs + and "content" in mdefs["requestBody"] + ): # SHORTCUT: We only care about JSON input for Kibble! Disregard other types - if not 'application/json' in mdefs['requestBody']['content']: - raise OpenAPIException ("OpenAPI mismatch: API endpoint accepts input, but no application/json definitions found in api.yaml!") - jdefs = mdefs['requestBody']['content']['application/json'] + if not "application/json" in mdefs["requestBody"]["content"]: + raise OpenAPIException( + "OpenAPI mismatch: API endpoint accepts input, but no application/json definitions found in api.yaml!" + ) + jdefs = mdefs["requestBody"]["content"]["application/json"] # Check that required params are here self.validateSchema(jdefs, formdata) else: - raise OpenAPIException ("OpenAPI mismatch: Method %s is not registered for this API" % method) + raise OpenAPIException( + "OpenAPI mismatch: Method %s is not registered for this API" + % method + ) else: raise OpenAPIException("OpenAPI mismatch: Unknown API path '%s'!" % path) - def dumpExamples(self, pdef, array = False): + def dumpExamples(self, pdef, array=False): schema = None - if 'schema' in pdef: - if 'type' in pdef['schema'] and pdef['schema']['type'] == 'array': + if "schema" in pdef: + if "type" in pdef["schema"] and pdef["schema"]["type"] == "array": array = True - schema = pdef['schema']['items']['$ref'] + schema = pdef["schema"]["items"]["$ref"] else: - schema = pdef['schema']['$ref'] - if '$ref' in pdef: - schema = pdef['$ref'] + schema = pdef["schema"]["$ref"] + if "$ref" in pdef: + schema = pdef["$ref"] if schema: # #/foo/bar/baz --> dict['foo']['bar']['baz'] - pdef = functools.reduce(operator.getitem, schema.split('/')[1:], self.API) + pdef = functools.reduce(operator.getitem, schema.split("/")[1:], self.API) js = {} desc = {} - if 'properties' in pdef: - for k, v in pdef['properties'].items(): - if 'description' in v: - desc[k] = [v['type'], v['description']] - if 'example' in v: - js[k] = v['example'] - elif 'items' in v: - if v['type'] == 'array': - js[k], foo = self.dumpExamples(v['items'], True) + if "properties" in pdef: + for k, v in pdef["properties"].items(): + if "description" in v: + desc[k] = [v["type"], v["description"]] + if "example" in v: + js[k] = v["example"] + elif "items" in v: + if v["type"] == "array": + js[k], foo = self.dumpExamples(v["items"], True) else: - js[k], foo = self.dumpExamples(v['items']) + js[k], foo = self.dumpExamples(v["items"]) return [js if not array else [js], desc] def toHTML(self): """ Blurps out the specs in a pretty HTML blob """ - print(""" + print( + """ -""") +""" + ) li = "

Overview:

    " - for path, spec in sorted(self.API['paths'].items()): + for path, spec in sorted(self.API["paths"].items()): for method, mspec in sorted(spec.items()): method = method.upper() - summary = mspec.get('summary', 'No summary available') - linkname = "%s%s" % (method.lower(), path.replace('/', '-')) - li += "
  • %s %s: %s
  • \n" % (linkname, method, path, summary) + summary = mspec.get("summary", "No summary available") + linkname = "%s%s" % (method.lower(), path.replace("/", "-")) + li += "
  • %s %s: %s
  • \n" % ( + linkname, + method, + path, + summary, + ) li += "
" print(li) - for path, spec in sorted(self.API['paths'].items()): + for path, spec in sorted(self.API["paths"].items()): for method, mspec in sorted(spec.items()): method = method.upper() - summary = mspec.get('summary', 'No summary available') + summary = mspec.get("summary", "No summary available") resp = "" inp = "" inpvars = "" - linkname = "%s%s" % (method.lower(), path.replace('/', '-')) - if 'responses' in mspec: - for code, cresp in sorted(mspec['responses'].items()): - for ctype, pdef in cresp['content'].items(): + linkname = "%s%s" % (method.lower(), path.replace("/", "-")) + if "responses" in mspec: + for code, cresp in sorted(mspec["responses"].items()): + for ctype, pdef in cresp["content"].items(): xjs, desc = self.dumpExamples(pdef) - js = json.dumps(xjs, indent = 4) - resp += "
%s:\n%s
\n
\n" % (code, js) - - if 'requestBody' in mspec: - for ctype, pdef in mspec['requestBody']['content'].items(): + js = json.dumps(xjs, indent=4) + resp += ( + "
%s:\n%s
\n
\n" + % (code, js) + ) + + if "requestBody" in mspec: + for ctype, pdef in mspec["requestBody"]["content"].items(): xjs, desc = self.dumpExamples(pdef) if desc: for k, v in desc.items(): - inpvars += "%s: (%s) %s
\n" % (k, v[0], v[1]) - js = json.dumps(xjs, indent = 4) - inp += "

Input examples:

%s:\n%s
\n
" % (ctype, js) + inpvars += ( + "%s: (%s) %s
\n" + % (k, v[0], v[1]) + ) + js = json.dumps(xjs, indent=4) + inp += ( + "

Input examples:

%s:\n%s
\n
" + % (ctype, js) + ) if inpvars: - inpvars = "
%s
\n
" % inpvars - + inpvars = ( + "
%s
\n
" + % inpvars + ) - print(""" + print( + """
@@ -256,6 +303,20 @@ def toHTML(self):
- """ % (linkname, mcolors[method], mcolors[method], mcolors[method], method, path, summary, "block" if inp else "none", inpvars, inp, resp)) - #print("%s %s: %s" % (method.upper(), path, mspec['summary'])) + """ + % ( + linkname, + mcolors[method], + mcolors[method], + mcolors[method], + method, + path, + summary, + "block" if inp else "none", + inpvars, + inp, + resp, + ) + ) + # print("%s %s: %s" % (method.upper(), path, mspec['summary'])) print("") diff --git a/kibble/api/plugins/session.py b/kibble/api/plugins/session.py index 6c212f51..035fb76e 100644 --- a/kibble/api/plugins/session.py +++ b/kibble/api/plugins/session.py @@ -27,102 +27,89 @@ class KibbleSession(object): - def getView(self, viewID): - if self.DB.ES.exists(index=self.DB.dbname, doc_type="view", id = viewID): - view = self.DB.ES.get(index=self.DB.dbname, doc_type="view", id = viewID) - return view['_source']['sourceList'] + if self.DB.ES.exists(index=self.DB.dbname, doc_type="view", id=viewID): + view = self.DB.ES.get(index=self.DB.dbname, doc_type="view", id=viewID) + return view["_source"]["sourceList"] return [] - def subFilter(self, subfilter, view = []): + def subFilter(self, subfilter, view=[]): if len(subfilter) == 0: return view - dOrg = self.user['defaultOrganisation'] or "apache" + dOrg = self.user["defaultOrganisation"] or "apache" res = self.DB.ES.search( - index=self.DB.dbname, - doc_type="source", - size = 10000, - _source_include = ['sourceURL', 'sourceID'], - body = { - 'query': { - 'bool': { - 'must': [ - {'term': { - 'organisation': dOrg - } - }] - } - - } - } - ) + index=self.DB.dbname, + doc_type="source", + size=10000, + _source_include=["sourceURL", "sourceID"], + body={"query": {"bool": {"must": [{"term": {"organisation": dOrg}}]}}}, + ) sources = [] - for doc in res['hits']['hits']: - sid = doc['_source']['sourceID'] - m = re.search(subfilter, doc['_source']['sourceURL'], re.IGNORECASE) + for doc in res["hits"]["hits"]: + sid = doc["_source"]["sourceID"] + m = re.search(subfilter, doc["_source"]["sourceURL"], re.IGNORECASE) if m and ((not view) or (sid in view)): sources.append(sid) if not sources: - sources = ['x'] # blank return to not show eeeeverything + sources = ["x"] # blank return to not show eeeeverything return sources - def subType(self, stype, view = []): + def subType(self, stype, view=[]): if len(stype) == 0: return view if type(stype) is str: stype = [stype] - dOrg = self.user['defaultOrganisation'] or "apache" + dOrg = self.user["defaultOrganisation"] or "apache" res = self.DB.ES.search( - index=self.DB.dbname, - doc_type="source", - size = 10000, - _source_include = ['sourceURL', 'sourceID', 'type'], - body = { - 'query': { - 'bool': { - 'must': [ - {'term': { - 'organisation': dOrg - } - }, - {'terms': { - 'type': stype - } - } - ] - } - + index=self.DB.dbname, + doc_type="source", + size=10000, + _source_include=["sourceURL", "sourceID", "type"], + body={ + "query": { + "bool": { + "must": [ + {"term": {"organisation": dOrg}}, + {"terms": {"type": stype}}, + ] } } - ) + }, + ) sources = [] - for doc in res['hits']['hits']: - sid = doc['_source']['sourceID'] - m = doc['_source']['type'] in stype + for doc in res["hits"]["hits"]: + sid = doc["_source"]["sourceID"] + m = doc["_source"]["type"] in stype if m and ((not view) or (sid in view)): sources.append(sid) if not sources: - sources = ['x'] # blank return to not show eeeeverything + sources = ["x"] # blank return to not show eeeeverything return sources def logout(self): """Log out user and wipe cookie""" if self.user and self.cookie: cookies = http.cookies.SimpleCookie() - cookies['kibble_session'] = "null" - self.headers.append(('Set-Cookie', cookies['kibble_session'].OutputString())) + cookies["kibble_session"] = "null" + self.headers.append( + ("Set-Cookie", cookies["kibble_session"].OutputString()) + ) try: - self.DB.ES.delete(index=self.DB.dbname, doc_type='uisession', id = self.cookie) + self.DB.ES.delete( + index=self.DB.dbname, doc_type="uisession", id=self.cookie + ) self.cookie = None self.user = None except: pass + def newCookie(self): cookie = uuid.uuid4() cookies = http.cookies.SimpleCookie() - cookies['kibble_session'] = cookie - cookies['kibble_session']['expires'] = 86400 * 365 # Expire one year from now - self.headers.append(('Set-Cookie', cookies['kibble_session'].OutputString())) + cookies["kibble_session"] = cookie + cookies["kibble_session"]["expires"] = 86400 * 365 # Expire one year from now + self.headers.append(("Set-Cookie", cookies["kibble_session"].OutputString())) + def __init__(self, DB, environ, config): """ Loads the current user session or initiates a new session if @@ -131,47 +118,75 @@ def __init__(self, DB, environ, config): self.config = config self.user = None self.DB = DB - self.headers = [('Content-Type', 'application/json; charset=utf-8')] + self.headers = [("Content-Type", "application/json; charset=utf-8")] self.cookie = None # Construct the URL we're visiting - self.url = "%s://%s" % (environ['wsgi.url_scheme'], environ.get('HTTP_HOST', environ.get('SERVER_NAME'))) - self.url += environ.get('SCRIPT_NAME', '/') + self.url = "%s://%s" % ( + environ["wsgi.url_scheme"], + environ.get("HTTP_HOST", environ.get("SERVER_NAME")), + ) + self.url += environ.get("SCRIPT_NAME", "/") # Get Kibble cookie cookie = None cookies = None - if 'HTTP_KIBBLE_TOKEN' in environ: - token = environ.get('HTTP_KIBBLE_TOKEN') - if re.match(r"^[-a-f0-9]+$", token): # Validate token, must follow UUID4 specs - res = self.DB.ES.search(index=self.DB.dbname, doc_type='useraccount', body = {"query": { "match": { "token": token}}}) - if res['hits']['hits']: - self.user = res['hits']['hits'][0]['_source'] + if "HTTP_KIBBLE_TOKEN" in environ: + token = environ.get("HTTP_KIBBLE_TOKEN") + if re.match( + r"^[-a-f0-9]+$", token + ): # Validate token, must follow UUID4 specs + res = self.DB.ES.search( + index=self.DB.dbname, + doc_type="useraccount", + body={"query": {"match": {"token": token}}}, + ) + if res["hits"]["hits"]: + self.user = res["hits"]["hits"][0]["_source"] self.newCookie() else: - if 'HTTP_COOKIE' in environ: - cookies = http.cookies.SimpleCookie(environ['HTTP_COOKIE']) - if cookies and 'kibble_session' in cookies: - cookie = cookies['kibble_session'].value + if "HTTP_COOKIE" in environ: + cookies = http.cookies.SimpleCookie(environ["HTTP_COOKIE"]) + if cookies and "kibble_session" in cookies: + cookie = cookies["kibble_session"].value try: - if re.match(r"^[-a-f0-9]+$", cookie): # Validate cookie, must follow UUID4 specs + if re.match( + r"^[-a-f0-9]+$", cookie + ): # Validate cookie, must follow UUID4 specs doc = None - sdoc = self.DB.ES.get(index=self.DB.dbname, doc_type='uisession', id = cookie) - if sdoc and 'cid' in sdoc['_source']: - doc = self.DB.ES.get(index=self.DB.dbname, doc_type='useraccount', id = sdoc['_source']['cid']) - if doc and '_source' in doc: + sdoc = self.DB.ES.get( + index=self.DB.dbname, doc_type="uisession", id=cookie + ) + if sdoc and "cid" in sdoc["_source"]: + doc = self.DB.ES.get( + index=self.DB.dbname, + doc_type="useraccount", + id=sdoc["_source"]["cid"], + ) + if doc and "_source" in doc: # Make sure this cookie has been used in the past 7 days, else nullify it. # Further more, run an update of the session if >1 hour ago since last update. - age = time.time() - sdoc['_source']['timestamp'] - if age > (7*86400): - self.DB.ES.delete(index=self.DB.dbname, doc_type='uisession', id = cookie) - sdoc['_source'] = None # Wipe it! + age = time.time() - sdoc["_source"]["timestamp"] + if age > (7 * 86400): + self.DB.ES.delete( + index=self.DB.dbname, + doc_type="uisession", + id=cookie, + ) + sdoc["_source"] = None # Wipe it! doc = None elif age > 3600: - sdoc['_source']['timestamp'] = int(time.time()) # Update timestamp in session DB - self.DB.ES.update(index=self.DB.dbname, doc_type='uisession', id = cookie, body = {'doc':sdoc['_source']}) + sdoc["_source"]["timestamp"] = int( + time.time() + ) # Update timestamp in session DB + self.DB.ES.update( + index=self.DB.dbname, + doc_type="uisession", + id=cookie, + body={"doc": sdoc["_source"]}, + ) if doc: - self.user = doc['_source'] + self.user = doc["_source"] else: cookie = None except Exception as err: diff --git a/kibble/api/yaml/openapi/combine.py b/kibble/api/yaml/openapi/combine.py index 962021ca..1418f591 100644 --- a/kibble/api/yaml/openapi/combine.py +++ b/kibble/api/yaml/openapi/combine.py @@ -42,7 +42,7 @@ def deconstruct(): yml = yaml.load(open(bpath + "/../openapi.yaml")) noDefs = 0 print("Dumping paths into pages...") - for endpoint, defs in yml['paths'].items(): + for endpoint, defs in yml["paths"].items(): noDefs += 1 xendpoint = endpoint.replace("/api/", "") ypath = os.path.abspath("%s/../../pages/%s.py" % (bpath, xendpoint)) @@ -50,40 +50,53 @@ def deconstruct(): if os.path.isfile(ypath): print("Editing %s" % ypath) contents = open(ypath, "r").read() - contents = re.sub(r"^([#\n](?!\s*\"\"\")[^\r\n]*\n?)+", "", contents, re.MULTILINE) + contents = re.sub( + r"^([#\n](?!\s*\"\"\")[^\r\n]*\n?)+", "", contents, re.MULTILINE + ) odefs = yaml.dump(defs, default_flow_style=False) odefs = "\n".join(["# %s" % line for line in odefs.split("\n")]) with open(ypath, "w") as f: - f.write("########################################################################\n") + f.write( + "########################################################################\n" + ) f.write("# OPENAPI-URI: %s\n" % endpoint) - f.write("########################################################################\n") + f.write( + "########################################################################\n" + ) f.write(odefs) - f.write("\n########################################################################\n") + f.write( + "\n########################################################################\n" + ) f.write("\n\n") f.write(contents) f.close() print("Dumping security components...") - for basetype, bdefs in yml['components'].items(): + for basetype, bdefs in yml["components"].items(): for schema, defs in bdefs.items(): noDefs += 1 ypath = "%s/components/%s/%s.yaml" % (bpath, basetype, schema) ydir = os.path.dirname(ypath) if not os.path.isdir(ydir): print("Making directory %s" % ydir) - os.makedirs(ydir, exist_ok = True) + os.makedirs(ydir, exist_ok=True) with open(ypath, "w") as f: - f.write("########################################################################\n") - f.write("# %-68s #\n" % defs.get('summary', schema)) - f.write("########################################################################\n") + f.write( + "########################################################################\n" + ) + f.write("# %-68s #\n" % defs.get("summary", schema)) + f.write( + "########################################################################\n" + ) f.write(yaml.dump(defs, default_flow_style=False)) f.close() print("Dumped %u definitions." % noDefs) + def construct(): yml = {} - yml['paths'] = {} - yml['components'] = {} + yml["paths"] = {} + yml["components"] = {} apidir = os.path.abspath("%s/../../pages/" % bpath) print("Scanning %s" % apidir) for d in os.listdir(apidir): @@ -102,7 +115,7 @@ def construct(): print("Weaving in API path %s" % apath) cyml = "\n".join([line[2:] for line in cyml.split("\n")]) defs = yaml.load(cyml) - yml['paths'][apath] = defs + yml["paths"][apath] = defs else: fname = d if fname.endswith(".py"): @@ -117,7 +130,7 @@ def construct(): print("Weaving in API path %s" % apath) cyml = "\n".join([line[2:] for line in cyml.split("\n")]) defs = yaml.load(cyml) - yml['paths'][apath] = defs + yml["paths"][apath] = defs apidir = os.path.abspath("%s/components" % bpath) print("Scanning %s" % apidir) for d in os.listdir(apidir): @@ -126,11 +139,11 @@ def construct(): print("Scanning %s" % cdir) for fname in os.listdir(cdir): if fname.endswith(".yaml"): - yml['components'][d] = yml['components'].get(d, {}) + yml["components"][d] = yml["components"].get(d, {}) fpath = "%s/%s" % (cdir, fname) print("Scanning %s" % fpath) defs = yaml.load(open(fpath)) - yml['components'][d][fname.replace(".yaml", "")] = defs + yml["components"][d][fname.replace(".yaml", "")] = defs ypath = os.path.join(YAML_DIRECTORY, "openapi.yaml") with open(ypath, "w") as f: f.write(baseyaml) @@ -138,7 +151,8 @@ def construct(): f.close() print("All done!") -if len(sys.argv) > 1 and sys.argv[1] == 'deconstruct': + +if len(sys.argv) > 1 and sys.argv[1] == "deconstruct": deconstruct() else: construct() diff --git a/kibble/settings.py b/kibble/settings.py index db2c1bc8..243c3011 100644 --- a/kibble/settings.py +++ b/kibble/settings.py @@ -18,8 +18,6 @@ import os YAML_DIRECTORY = os.path.join( - os.path.dirname(os.path.realpath(__file__)), - "api", - "yaml", + os.path.dirname(os.path.realpath(__file__)), "api", "yaml" ) KIBBLE_YAML = os.path.join(YAML_DIRECTORY, "kibble.yaml") diff --git a/kibble/setup/makeaccount.py b/kibble/setup/makeaccount.py index 1f169d37..117e928b 100644 --- a/kibble/setup/makeaccount.py +++ b/kibble/setup/makeaccount.py @@ -27,26 +27,46 @@ class KibbleDatabase(object): def __init__(self, config): self.config = config - self.dbname = config['elasticsearch']['dbname'] - self.ES = elasticsearch.Elasticsearch([{ - 'host': config['elasticsearch']['host'], - 'port': int(config['elasticsearch']['port']), - 'use_ssl': config['elasticsearch']['ssl'], - 'verify_certs': False, - 'url_prefix': config['elasticsearch']['uri'] if 'uri' in config['elasticsearch'] else '', - 'http_auth': config['elasticsearch']['auth'] if 'auth' in config['elasticsearch'] else None - }], - max_retries=5, - retry_on_timeout=True - ) + self.dbname = config["elasticsearch"]["dbname"] + self.ES = elasticsearch.Elasticsearch( + [ + { + "host": config["elasticsearch"]["host"], + "port": int(config["elasticsearch"]["port"]), + "use_ssl": config["elasticsearch"]["ssl"], + "verify_certs": False, + "url_prefix": config["elasticsearch"]["uri"] + if "uri" in config["elasticsearch"] + else "", + "http_auth": config["elasticsearch"]["auth"] + if "auth" in config["elasticsearch"] + else None, + } + ], + max_retries=5, + retry_on_timeout=True, + ) arg_parser = argparse.ArgumentParser() -arg_parser.add_argument("-u", "--username", required=True, help="Username (email) of accoun to create") -arg_parser.add_argument("-p", "--password", required=True, help="Password to set for account") -arg_parser.add_argument("-n", "--name", help="Real name (displayname) of account (optional)") -arg_parser.add_argument("-A", "--admin", action="store_true", help="Make account global admin") -arg_parser.add_argument("-a", "--orgadmin", action="store_true", help="Make account owner of orgs invited to") +arg_parser.add_argument( + "-u", "--username", required=True, help="Username (email) of accoun to create" +) +arg_parser.add_argument( + "-p", "--password", required=True, help="Password to set for account" +) +arg_parser.add_argument( + "-n", "--name", help="Real name (displayname) of account (optional)" +) +arg_parser.add_argument( + "-A", "--admin", action="store_true", help="Make account global admin" +) +arg_parser.add_argument( + "-a", + "--orgadmin", + action="store_true", + help="Make account owner of orgs invited to", +) arg_parser.add_argument("-o", "--org", help="Invite to this organisation") args = arg_parser.parse_args() @@ -66,16 +86,16 @@ def __init__(self, config): aorgs = orgs if adminorg else [] salt = bcrypt.gensalt() -pwd = bcrypt.hashpw(password.encode('utf-8'), salt).decode('ascii') +pwd = bcrypt.hashpw(password.encode("utf-8"), salt).decode("ascii") doc = { - 'email': username, # Username (email) - 'password': pwd, # Hashed password - 'displayName': username, # Display Name - 'organisations': orgs, # Orgs user belongs to (default is none) - 'ownerships': aorgs, # Orgs user owns (default is none) - 'defaultOrganisation': None, # Default org for user - 'verified': True, # Account verified via email? - 'userlevel': "admin" if admin else "user" # User level (user/admin) - } -DB.ES.index(index=DB.dbname, doc_type='useraccount', id = username, body = doc) + "email": username, # Username (email) + "password": pwd, # Hashed password + "displayName": username, # Display Name + "organisations": orgs, # Orgs user belongs to (default is none) + "ownerships": aorgs, # Orgs user owns (default is none) + "defaultOrganisation": None, # Default org for user + "verified": True, # Account verified via email? + "userlevel": "admin" if admin else "user", # User level (user/admin) +} +DB.ES.index(index=DB.dbname, doc_type="useraccount", id=username, body=doc) print("Account created!") diff --git a/kibble/setup/setup.py b/kibble/setup/setup.py index c204457b..bded26d9 100644 --- a/kibble/setup/setup.py +++ b/kibble/setup/setup.py @@ -1,5 +1,3 @@ - - # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information @@ -31,7 +29,7 @@ from kibble.settings import KIBBLE_YAML -KIBBLE_VERSION = '0.1.0' # ABI/API compat demarcation. +KIBBLE_VERSION = "0.1.0" # ABI/API compat demarcation. KIBBLE_DB_VERSION = 2 # Second database revision if sys.version_info <= (3, 3): @@ -43,38 +41,54 @@ def get_parser(): arg_parser = argparse.ArgumentParser() arg_parser.add_argument( - "-e", "--hostname", + "-e", + "--hostname", help="Pre-defined hostname for ElasticSearch (docker setups). Default: localhost", - default="localhost" + default="localhost", ) arg_parser.add_argument( - "-p", "--port", - help="Pre-defined port for ES (docker setups). Default: 9200", default=9200 + "-p", + "--port", + help="Pre-defined port for ES (docker setups). Default: 9200", + default=9200, ) arg_parser.add_argument( - "-d", "--dbname", help="Pre-defined Database prefix (docker setups). Default: kibble", default="kibble" + "-d", + "--dbname", + help="Pre-defined Database prefix (docker setups). Default: kibble", + default="kibble", ) arg_parser.add_argument( - "-s", "--shards", help="Predefined number of ES shards (docker setups), Default: 5", default=5 + "-s", + "--shards", + help="Predefined number of ES shards (docker setups), Default: 5", + default=5, ) arg_parser.add_argument( - "-r", "--replicas", help="Predefined number of replicas for ES (docker setups). Default: 1", default=1 + "-r", + "--replicas", + help="Predefined number of replicas for ES (docker setups). Default: 1", + default=1, ) arg_parser.add_argument( - "-m", "--mailhost", + "-m", + "--mailhost", help="Pre-defined mail server host (docker setups). Default: localhost:25", - default="localhost:25" + default="localhost:25", ) arg_parser.add_argument( - "-a", "--autoadmin", - action='store_true', + "-a", + "--autoadmin", + action="store_true", help="Generate generic admin account (docker setups). Default: False", - default=False + default=False, ) arg_parser.add_argument( - "-k", "--skiponexist", - action='store_true', - help="Skip DB creation if DBs exist (docker setups). Defaul: True", default=True + "-k", + "--skiponexist", + action="store_true", + help="Skip DB creation if DBs exist (docker setups). Defaul: True", + default=True, ) return arg_parser @@ -94,24 +108,21 @@ def create_es_index( # elasticsearch logs lots of warnings on retries/connection failure logging.getLogger("elasticsearch").setLevel(logging.ERROR) - mappings_json = os.path.join(os.path.dirname(os.path.realpath(__file__)), "mappings.json") + mappings_json = os.path.join( + os.path.dirname(os.path.realpath(__file__)), "mappings.json" + ) with open(mappings_json, "r") as f: mappings = json.load(f) - es = Elasticsearch([ - { - 'host': hostname, - 'port': port, - 'use_ssl': False, - 'url_prefix': '' - }], + es = Elasticsearch( + [{"host": hostname, "port": port, "use_ssl": False, "url_prefix": ""}], max_retries=5, - retry_on_timeout=True + retry_on_timeout=True, ) - es_version =es.info()['version']['number'] - es6 = int(es_version.split('.')[0]) >= 6 - es7 = int(es_version.split('.')[0]) >= 7 + es_version = es.info()["version"]["number"] + es6 = int(es_version.split(".")[0]) >= 6 + es7 = int(es_version.split(".")[0]) >= 7 if not es6: print( @@ -122,7 +133,7 @@ def create_es_index( # If ES >= 7, _doc is invalid and mapping should be rooted if es7: - mappings['mappings'] = mappings['mappings']['_doc'] + mappings["mappings"] = mappings["mappings"]["_doc"] # Check if index already exists if es.indices.exists(dbname + "_api"): @@ -134,88 +145,81 @@ def create_es_index( sys.exit(-1) types = [ - 'api', + "api", # ci_*: CI service stats - 'ci_build', - 'ci_queue', + "ci_build", + "ci_queue", # code_* + evolution + file_history: git repo stats - 'code_commit', - 'code_commit_unique', - 'code_modification', - 'evolution', - 'file_history', + "code_commit", + "code_commit_unique", + "code_modification", + "evolution", + "file_history", # forum_*: forum stats (SO, Discourse, Askbot etc) - 'forum_post', - 'forum_topic', + "forum_post", + "forum_topic", # GitHub stats - 'ghstats', + "ghstats", # im_*: Instant messaging stats - 'im_stats', - 'im_ops', - 'im_msg', - 'issue', - 'logstats', + "im_stats", + "im_ops", + "im_msg", + "issue", + "logstats", # email, mail*: Email statitics - 'email', - 'mailstats', - 'mailtop', + "email", + "mailstats", + "mailtop", # organisation, view, source, publish: UI Org DB - 'organisation', - 'view', - 'publish', - 'source', + "organisation", + "view", + "publish", + "source", # stats: Miscellaneous stats - 'stats', + "stats", # social_*: Twitter, Mastodon, Facebook etc - 'social_follow', - 'social_followers', - 'social_follower', - 'social_person', + "social_follow", + "social_followers", + "social_follower", + "social_person", # uisession, useraccount, message: UI user DB - 'uisession', - 'useraccount', - 'message', + "uisession", + "useraccount", + "message", # person: contributor DB - 'person', + "person", ] for t in types: iname = f"{dbname}_{t}" print(f"Creating index {iname}") - settings = { - "number_of_shards": shards, - "number_of_replicas": replicas - } + settings = {"number_of_shards": shards, "number_of_replicas": replicas} es.indices.create( - index=iname, - body={ - "mappings": mappings['mappings'], - "settings": settings - } + index=iname, body={"mappings": mappings["mappings"], "settings": settings} ) print(f"Indices created!") print() salt = bcrypt.gensalt() - pwd = bcrypt.hashpw(admin_pass.encode('utf-8'), salt).decode('ascii') + pwd = bcrypt.hashpw(admin_pass.encode("utf-8"), salt).decode("ascii") print("Creating administrator account") doc = { - 'email': admin_name, # Username (email) - 'password': pwd, # Hashed password - 'displayName': "Administrator", # Display Name - 'organisations': [], # Orgs user belongs to (default is none) - 'ownerships': [], # Orgs user owns (default is none) - 'defaultOrganisation': None, # Default org for user - 'verified': True, # Account verified via email? - 'userlevel': "admin" # User level (user/admin) - } + "email": admin_name, # Username (email) + "password": pwd, # Hashed password + "displayName": "Administrator", # Display Name + "organisations": [], # Orgs user belongs to (default is none) + "ownerships": [], # Orgs user owns (default is none) + "defaultOrganisation": None, # Default org for user + "verified": True, # Account verified via email? + "userlevel": "admin", # User level (user/admin) + } dbdoc = { - 'apiversion': KIBBLE_VERSION, # Log current API version - 'dbversion': KIBBLE_DB_VERSION # Log the database revision we accept (might change!) + "apiversion": KIBBLE_VERSION, # Log current API version + "dbversion": KIBBLE_DB_VERSION, # Log the database revision we accept (might change!) } - es.index(index=dbname+'_useraccount', doc_type='_doc', id=admin_name, body=doc) - es.index(index=dbname+'_api', doc_type='_doc', id='current', body=dbdoc) + es.index(index=dbname + "_useraccount", doc_type="_doc", id=admin_name, body=doc) + es.index(index=dbname + "_api", doc_type="_doc", id="current", body=dbdoc) print("Account created!") @@ -228,48 +232,39 @@ def get_kibble_yaml() -> str: return kibble_yaml -def save_config( - mlserver: str, - hostname: str, - port: int, - dbname: str, -): +def save_config(mlserver: str, hostname: str, port: int, dbname: str): """Save kibble config to yaml file""" if ":" in mlserver: try: mailhost, mailport = mlserver.split(":") except ValueError: - raise ValueError("mailhost argument must be in form of `host:port` or `host`") + raise ValueError( + "mailhost argument must be in form of `host:port` or `host`" + ) else: mailhost = mlserver mailport = 25 config = { - 'api': { - 'version': KIBBLE_VERSION, - 'database': KIBBLE_DB_VERSION - }, - 'elasticsearch': { - 'host': hostname, - 'port': port, - 'ssl': False, - 'dbname': dbname + "api": {"version": KIBBLE_VERSION, "database": KIBBLE_DB_VERSION}, + "elasticsearch": { + "host": hostname, + "port": port, + "ssl": False, + "dbname": dbname, }, - 'mail': { - 'mailhost': mailhost, - 'mailport': int(mailport), - 'sender': 'Kibble ' + "mail": { + "mailhost": mailhost, + "mailport": int(mailport), + "sender": "Kibble ", }, - 'accounts': { - 'allowSignup': True, - 'verify': True - } + "accounts": {"allowSignup": True, "verify": True}, } kibble_yaml = get_kibble_yaml() print(f"Writing Kibble config to {kibble_yaml}") with open(kibble_yaml, "w") as f: - f.write(yaml.dump(config, default_flow_style = False)) + f.write(yaml.dump(config, default_flow_style=False)) f.close() @@ -281,7 +276,9 @@ def get_user_input(msg: str, secure: bool = False): def print_configuration(args): - print("Configuring Apache Kibble elasticsearch instance with the following arguments:") + print( + "Configuring Apache Kibble elasticsearch instance with the following arguments:" + ) print(f"- hostname: {args.hostname}") print(f"- port: {int(args.port)}") print(f"- dbname: {args.dbname}") @@ -305,17 +302,21 @@ def main(): admin_name = "admin@kibble" admin_pass = "kibbleAdmin" if not args.autoadmin: - admin_name = get_user_input("Enter an email address for the administrator account:") - admin_pass = get_user_input("Enter a password for the administrator account:", secure=True) + admin_name = get_user_input( + "Enter an email address for the administrator account:" + ) + admin_pass = get_user_input( + "Enter a password for the administrator account:", secure=True + ) # Create Elasticsearch index # Retry in case ES is not yet up print(f"Elasticsearch: {args.hostname}:{args.port}") for attempt in tenacity.Retrying( - retry=tenacity.retry_if_exception_type(exception_types=Exception), - wait=tenacity.wait_fixed(10), - stop=tenacity.stop_after_attempt(10), - reraise=True + retry=tenacity.retry_if_exception_type(exception_types=Exception), + wait=tenacity.wait_fixed(10), + stop=tenacity.stop_after_attempt(10), + reraise=True, ): with attempt: print("Trying to create ES index...") @@ -342,5 +343,5 @@ def main(): print("All done, Kibble should...work now :)") -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/setup.py b/setup.py index 4208d68e..abfe996f 100644 --- a/setup.py +++ b/setup.py @@ -21,14 +21,14 @@ from setuptools import find_packages, setup # Kept manually in sync with kibble.version -spec = util.spec_from_file_location("kibble.version", os.path.join('kibble', 'version.py')) # noqa +spec = util.spec_from_file_location( + "kibble.version", os.path.join("kibble", "version.py") +) # noqa mod = util.module_from_spec(spec) spec.loader.exec_module(mod) # type: ignore version = mod.version # type: ignore -DEVEL_REQUIREMENTS = [ - "pre-commit==2.7.1", -] +DEVEL_REQUIREMENTS = ["pre-commit==2.7.1", "black==20.8b1"] INSTALL_REQUIREMENTS = [ "bcrypt==3.2.0", @@ -40,15 +40,16 @@ "tenacity==6.2.0", ] -EXTRAS_REQUIREMENTS = { - "devel": DEVEL_REQUIREMENTS -} +EXTRAS_REQUIREMENTS = {"devel": DEVEL_REQUIREMENTS} def get_long_description(): description = "" try: - with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'README.md'), encoding='utf-8') as f: + with open( + os.path.join(os.path.dirname(os.path.realpath(__file__)), "README.md"), + encoding="utf-8", + ) as f: description = f.read() except FileNotFoundError: pass @@ -58,51 +59,39 @@ def get_long_description(): def do_setup(): """Perform the Kibble package setup.""" setup( - name='apache-kibble', + name="apache-kibble", description="Apache Kibble is a tool to collect, aggregate and visualize data about any software project.", long_description=get_long_description(), - long_description_content_type='text/markdown', - license='Apache License 2.0', + long_description_content_type="text/markdown", + license="Apache License 2.0", version=version, - packages=find_packages(include=['kibble*']), - package_data={ - 'kibble': ['py.typed'], - 'kibble.api.yaml': ['*.yaml'], - }, + packages=find_packages(include=["kibble*"]), + package_data={"kibble": ["py.typed"], "kibble.api.yaml": ["*.yaml"]}, include_package_data=True, zip_safe=False, - entry_points={ - "console_scripts": [ - "kibble = kibble.__main__:main", - ], - }, + entry_points={"console_scripts": ["kibble = kibble.__main__:main"]}, install_requires=INSTALL_REQUIREMENTS, - setup_requires=[ - 'docutils', - 'gitpython', - 'setuptools', - 'wheel', - ], + setup_requires=["docutils", "gitpython", "setuptools", "wheel"], extras_require=EXTRAS_REQUIREMENTS, classifiers=[ - 'Development Status :: 5 - Production/Stable', - 'Environment :: Console', - 'Environment :: Web Environment', - 'Intended Audience :: Developers', - 'Intended Audience :: System Administrators', - 'License :: OSI Approved :: Apache Software License', - 'Programming Language :: Python :: 3.8', + "Development Status :: 5 - Production/Stable", + "Environment :: Console", + "Environment :: Web Environment", + "Intended Audience :: Developers", + "Intended Audience :: System Administrators", + "License :: OSI Approved :: Apache Software License", + "Programming Language :: Python :: 3.8", ], - author='Apache Software Foundation', - author_email='dev@kibble.apache.org', - url='http://kibble.apache.org/', - download_url=f'https://archive.apache.org/dist/kibble/{version}', - test_suite='setup.kibble_test_suite', - python_requires='~=3.8', + author="Apache Software Foundation", + author_email="dev@kibble.apache.org", + url="http://kibble.apache.org/", + download_url=f"https://archive.apache.org/dist/kibble/{version}", + test_suite="setup.kibble_test_suite", + python_requires="~=3.8", project_urls={ - 'Documentation': 'https://kibble.apache.org/docs/', - 'Bug Tracker': 'https://github.com/apache/kibble/issues', - 'Source Code': 'https://github.com/apache/kibble', + "Documentation": "https://kibble.apache.org/docs/", + "Bug Tracker": "https://github.com/apache/kibble/issues", + "Source Code": "https://github.com/apache/kibble", }, ) From 17e67344146233f861e55028a0fa4fa2071d37f7 Mon Sep 17 00:00:00 2001 From: Tomek Urbaszek Date: Tue, 27 Oct 2020 17:03:23 +0100 Subject: [PATCH 12/48] Add kibana dashboard to dokcer-compose (#75) --- .github/labeler.yml | 2 +- docker-compose-dev.yaml | 12 ++++++++++-- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/.github/labeler.yml b/.github/labeler.yml index ee69a607..fff54b4c 100644 --- a/.github/labeler.yml +++ b/.github/labeler.yml @@ -9,7 +9,7 @@ area:docs: - '*.md' area:dev: - - '.github/' + - '.github/*' - '.pre-commit.config.yaml' - 'asf.yaml' - 'Dockerfile*' diff --git a/docker-compose-dev.yaml b/docker-compose-dev.yaml index be6f1363..5aaff836 100644 --- a/docker-compose-dev.yaml +++ b/docker-compose-dev.yaml @@ -41,8 +41,8 @@ services: elasticsearch: image: elasticsearch:7.9.2 ports: - - "9200:9200" - - "9300:9300" + - 9200:9200 + - 9300:9300 environment: node.name: es01 discovery.seed_hosts: es02 @@ -57,6 +57,14 @@ services: soft: -1 hard: -1 + # Kibana to view and manage Elasticsearch + kibana: + image: kibana:7.9.3 + ports: + - 5601:5601 + depends_on: + - elasticsearch + volumes: # named volumes can be managed easier using docker-compose kibble-es-data: From 2abfcc871dd35ddc727317267a4595f8230b53eb Mon Sep 17 00:00:00 2001 From: Sharvil Kekre Date: Tue, 27 Oct 2020 09:50:56 -0700 Subject: [PATCH 13/48] Add KibbleConfigParser (#74) --- kibble/configuration.py | 35 +++++++++++++++++++++++++++++++++++ kibble/setup/kibble.ini | 16 ++++++++++++++++ 2 files changed, 51 insertions(+) create mode 100644 kibble/configuration.py create mode 100644 kibble/setup/kibble.ini diff --git a/kibble/configuration.py b/kibble/configuration.py new file mode 100644 index 00000000..9f8ca990 --- /dev/null +++ b/kibble/configuration.py @@ -0,0 +1,35 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from configparser import ConfigParser + + +class KibbleConfigParser(ConfigParser): + def __init__(self): + super().__init__() + + def get_int(self, section: str, key: str) -> int: + try: + return int(self.get(section, key)) + except Exception: + raise TypeError("Unable to convert value to int") + + def get_bool(self, section: str, key: str) -> bool: + try: + return bool(self.get(section, key)) + except Exception: + raise TypeError("Unable to convert value to bool") diff --git a/kibble/setup/kibble.ini b/kibble/setup/kibble.ini new file mode 100644 index 00000000..e57c16d4 --- /dev/null +++ b/kibble/setup/kibble.ini @@ -0,0 +1,16 @@ +[accounts] +allowSignup = True +verify = True + +[api] +database = 2 +version = 0.1.0 + +[elasticsearch] +dbname = kibble +conn_uri = elasticsearch:9200 +ss = False + +[mail] +mailhost = localhost:25 +sender = Kibble From 4d5f537b61960b7ae41796c4800ee3a9c55e55ae Mon Sep 17 00:00:00 2001 From: Midhun R Nair Date: Tue, 27 Oct 2020 23:06:25 +0530 Subject: [PATCH 14/48] Fix deprecation warning for yaml.load() (#82) --- kibble/api/handler.py | 2 +- kibble/api/pages/org/sourcetypes.py | 2 +- kibble/api/pages/sources.py | 2 +- kibble/api/pages/widgets.py | 2 +- kibble/api/plugins/openapi.py | 2 +- kibble/api/yaml/openapi/combine.py | 8 ++++---- kibble/setup/makeaccount.py | 2 +- 7 files changed, 10 insertions(+), 10 deletions(-) diff --git a/kibble/api/handler.py b/kibble/api/handler.py index 63d24891..798e6685 100644 --- a/kibble/api/handler.py +++ b/kibble/api/handler.py @@ -47,7 +47,7 @@ # Load Kibble master configuration with open(KIBBLE_YAML, "r") as f: - config = yaml.load(f) + config = yaml.safe_load(f) # Instantiate database connections DB = None diff --git a/kibble/api/pages/org/sourcetypes.py b/kibble/api/pages/org/sourcetypes.py index 73c45701..df4dab94 100644 --- a/kibble/api/pages/org/sourcetypes.py +++ b/kibble/api/pages/org/sourcetypes.py @@ -74,6 +74,6 @@ def run(API, environ, indata, session): with open(os.path.join(YAML_DIRECTORY, "sourcetypes.yaml")) as f: - types = yaml.load(f) + types = yaml.safe_load(f) yield json.dumps(types) diff --git a/kibble/api/pages/sources.py b/kibble/api/pages/sources.py index 0b737ee1..fc8923ca 100644 --- a/kibble/api/pages/sources.py +++ b/kibble/api/pages/sources.py @@ -227,7 +227,7 @@ def run(API, environ, indata, session): new = 0 old = 0 with open(os.path.join(YAML_DIRECTORY, "sourcetypes.yaml")) as f: - stypes = yaml.load(f) + stypes = yaml.safe_load(f) for source in indata.get("sources", []): sourceURL = source["sourceURL"] sourceType = source["type"] diff --git a/kibble/api/pages/widgets.py b/kibble/api/pages/widgets.py index 53f7ced8..35a7c963 100644 --- a/kibble/api/pages/widgets.py +++ b/kibble/api/pages/widgets.py @@ -60,7 +60,7 @@ def run(API, environ, indata, session): raise API.exception(403, "You must be logged in to use this API endpoint! %s") with open(os.path.join(YAML_DIRECTORY, "widgets.yaml")) as f: - widgets = yaml.load(f) + widgets = yaml.safe_load(f) page = indata["pageid"] if not page or page == "0": diff --git a/kibble/api/plugins/openapi.py b/kibble/api/plugins/openapi.py index 36242e1c..6265fc29 100644 --- a/kibble/api/plugins/openapi.py +++ b/kibble/api/plugins/openapi.py @@ -58,7 +58,7 @@ def __init__(self, APIFile): if APIFile.endswith(".json") or APIFile.endswith(".js"): self.API = json.load(open(APIFile)) else: - self.API = yaml.load(open(APIFile)) + self.API = yaml.safe_load(open(APIFile)) def validateType(self, field, value, ftype): """ Validate a single field value against an expected type """ diff --git a/kibble/api/yaml/openapi/combine.py b/kibble/api/yaml/openapi/combine.py index 1418f591..da618280 100644 --- a/kibble/api/yaml/openapi/combine.py +++ b/kibble/api/yaml/openapi/combine.py @@ -39,7 +39,7 @@ def deconstruct(): - yml = yaml.load(open(bpath + "/../openapi.yaml")) + yml = yaml.safe_load(open(bpath + "/../openapi.yaml")) noDefs = 0 print("Dumping paths into pages...") for endpoint, defs in yml["paths"].items(): @@ -114,7 +114,7 @@ def construct(): cyml = m.group(2) print("Weaving in API path %s" % apath) cyml = "\n".join([line[2:] for line in cyml.split("\n")]) - defs = yaml.load(cyml) + defs = yaml.safe_load(cyml) yml["paths"][apath] = defs else: fname = d @@ -129,7 +129,7 @@ def construct(): cyml = m.group(2) print("Weaving in API path %s" % apath) cyml = "\n".join([line[2:] for line in cyml.split("\n")]) - defs = yaml.load(cyml) + defs = yaml.safe_load(cyml) yml["paths"][apath] = defs apidir = os.path.abspath("%s/components" % bpath) print("Scanning %s" % apidir) @@ -142,7 +142,7 @@ def construct(): yml["components"][d] = yml["components"].get(d, {}) fpath = "%s/%s" % (cdir, fname) print("Scanning %s" % fpath) - defs = yaml.load(open(fpath)) + defs = yaml.safe_load(open(fpath)) yml["components"][d][fname.replace(".yaml", "")] = defs ypath = os.path.join(YAML_DIRECTORY, "openapi.yaml") with open(ypath, "w") as f: diff --git a/kibble/setup/makeaccount.py b/kibble/setup/makeaccount.py index 117e928b..50364859 100644 --- a/kibble/setup/makeaccount.py +++ b/kibble/setup/makeaccount.py @@ -73,7 +73,7 @@ def __init__(self, config): # Load Kibble master configuration with open(KIBBLE_YAML) as f: - config = yaml.load(f) + config = yaml.safe_load(f) DB = KibbleDatabase(config) From 5471c07a3798a58333d5cd3c8fac7dd789209538 Mon Sep 17 00:00:00 2001 From: Tomek Urbaszek Date: Tue, 27 Oct 2020 18:38:27 +0100 Subject: [PATCH 15/48] Remove ulimits for elasticsearch in docker-compose (#80) --- .gitignore | 4 ++-- docker-compose-dev.yaml | 7 +------ 2 files changed, 3 insertions(+), 8 deletions(-) diff --git a/.gitignore b/.gitignore index f7ef7928..b12fa18f 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,6 @@ # Apache Kibble files -api/yaml/kibble.yaml -kibble/api/yaml/kibble.yaml +api/yaml/kibble.yaml* +kibble/api/yaml/kibble.yaml* # Byte-compiled / optimized / DLL files __pycache__/ diff --git a/docker-compose-dev.yaml b/docker-compose-dev.yaml index 5aaff836..55c81c6a 100644 --- a/docker-compose-dev.yaml +++ b/docker-compose-dev.yaml @@ -47,15 +47,10 @@ services: node.name: es01 discovery.seed_hosts: es02 cluster.initial_master_nodes: es01 - cluster.name: traefik-tutorial-cluster - bootstrap.memory_lock: "true" + cluster.name: kibble ES_JAVA_OPTS: -Xms256m -Xmx256m volumes: - "kibble-es-data:/usr/share/elasticsearch/data" - ulimits: - memlock: - soft: -1 - hard: -1 # Kibana to view and manage Elasticsearch kibana: From 6959f3c51a594941abc08face115cf3057aaed88 Mon Sep 17 00:00:00 2001 From: Tomek Urbaszek Date: Mon, 2 Nov 2020 19:21:44 +0100 Subject: [PATCH 16/48] Use KibbleConfigParser in setup script (#83) * Use KibbleConfigParser in setup script * fixup! Use KibbleConfigParser in setup script --- docker-compose-dev.yaml | 2 +- kibble.ini | 26 +++++++++ kibble/configuration.py | 20 +++---- kibble/setup/kibble.ini | 16 ------ kibble/setup/setup.py | 111 +++++++----------------------------- setup.py | 2 +- tests/__init__.py | 16 ++++++ tests/test_configuration.py | 43 ++++++++++++++ 8 files changed, 118 insertions(+), 118 deletions(-) create mode 100644 kibble.ini delete mode 100644 kibble/setup/kibble.ini create mode 100644 tests/__init__.py create mode 100644 tests/test_configuration.py diff --git a/docker-compose-dev.yaml b/docker-compose-dev.yaml index 55c81c6a..7744b998 100644 --- a/docker-compose-dev.yaml +++ b/docker-compose-dev.yaml @@ -7,7 +7,7 @@ services: build: context: . dockerfile: Dockerfile.dev - command: bash -c "python kibble/setup/setup.py -e elasticsearch -a -k" + command: bash -c "python kibble/setup/setup.py --autoadmin --skiponexist" volumes: - .:/kibble/ depends_on: diff --git a/kibble.ini b/kibble.ini new file mode 100644 index 00000000..0089f4d8 --- /dev/null +++ b/kibble.ini @@ -0,0 +1,26 @@ +[accounts] +allowSignup = True +verify = True + +[api] +# Kibble elasticsearch database revision +database = 2 +# Version f the API +version = 0.1.0 + +[elasticsearch] +# Elasticsearch database name +dbname = kibble +# Connection uri used to determine host and port of elasticsearch instance +conn_uri = elasticsearch:9200 +# Number of shards in es cluster +shards = 5 +# Number of replicase in es cluster +replicas = 1 +ssl = False +uri = +auth = + +[mail] +mailhost = localhost:25 +sender = Kibble diff --git a/kibble/configuration.py b/kibble/configuration.py index 9f8ca990..26619d0a 100644 --- a/kibble/configuration.py +++ b/kibble/configuration.py @@ -14,22 +14,20 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. - +import os from configparser import ConfigParser +DEFAULT_KIBBLE_CONFIG_LOCATION = os.path.join( + os.path.dirname(os.path.realpath(__file__)), os.pardir, "kibble.ini" +) + class KibbleConfigParser(ConfigParser): + """Custom Kibble config parser""" + def __init__(self): super().__init__() - def get_int(self, section: str, key: str) -> int: - try: - return int(self.get(section, key)) - except Exception: - raise TypeError("Unable to convert value to int") - def get_bool(self, section: str, key: str) -> bool: - try: - return bool(self.get(section, key)) - except Exception: - raise TypeError("Unable to convert value to bool") +conf = KibbleConfigParser() +conf.read(DEFAULT_KIBBLE_CONFIG_LOCATION) diff --git a/kibble/setup/kibble.ini b/kibble/setup/kibble.ini deleted file mode 100644 index e57c16d4..00000000 --- a/kibble/setup/kibble.ini +++ /dev/null @@ -1,16 +0,0 @@ -[accounts] -allowSignup = True -verify = True - -[api] -database = 2 -version = 0.1.0 - -[elasticsearch] -dbname = kibble -conn_uri = elasticsearch:9200 -ss = False - -[mail] -mailhost = localhost:25 -sender = Kibble diff --git a/kibble/setup/setup.py b/kibble/setup/setup.py index bded26d9..79d67a92 100644 --- a/kibble/setup/setup.py +++ b/kibble/setup/setup.py @@ -22,15 +22,15 @@ from getpass import getpass import tenacity -import yaml import bcrypt import json from elasticsearch import Elasticsearch -from kibble.settings import KIBBLE_YAML +from kibble.configuration import conf -KIBBLE_VERSION = "0.1.0" # ABI/API compat demarcation. -KIBBLE_DB_VERSION = 2 # Second database revision + +KIBBLE_VERSION = conf.get("api", "version") +KIBBLE_DB_VERSION = conf.get("api", "database") # database revision if sys.version_info <= (3, 3): print("This script requires Python 3.4 or higher") @@ -42,60 +42,53 @@ def get_parser(): arg_parser = argparse.ArgumentParser() arg_parser.add_argument( "-e", - "--hostname", - help="Pre-defined hostname for ElasticSearch (docker setups). Default: localhost", - default="localhost", - ) - arg_parser.add_argument( - "-p", - "--port", - help="Pre-defined port for ES (docker setups). Default: 9200", - default=9200, + "--conn-uri", + help="Pre-defined connection uri for ElasticSearch.", + default=conf.get("elasticsearch", "conn_uri"), ) arg_parser.add_argument( "-d", "--dbname", - help="Pre-defined Database prefix (docker setups). Default: kibble", - default="kibble", + help="Pre-defined Database prefix. Default: kibble", + default=conf.get("elasticsearch", "dbname"), ) arg_parser.add_argument( "-s", "--shards", - help="Predefined number of ES shards (docker setups), Default: 5", - default=5, + help="Predefined number of ES shards, Default: 5", + default=conf.get("elasticsearch", "shards"), ) arg_parser.add_argument( "-r", "--replicas", - help="Predefined number of replicas for ES (docker setups). Default: 1", - default=1, + help="Predefined number of replicas for ES. Default: 1", + default=conf.get("elasticsearch", "replicas"), ) arg_parser.add_argument( "-m", "--mailhost", - help="Pre-defined mail server host (docker setups). Default: localhost:25", - default="localhost:25", + help="Pre-defined mail server host. Default: localhost:25", + default=conf.get("mail", "mailhost"), ) arg_parser.add_argument( "-a", "--autoadmin", action="store_true", - help="Generate generic admin account (docker setups). Default: False", + help="Generate generic admin account. Default: False", default=False, ) arg_parser.add_argument( "-k", "--skiponexist", action="store_true", - help="Skip DB creation if DBs exist (docker setups). Defaul: True", + help="Skip DB creation if DBs exist. Defaul: True", default=True, ) return arg_parser def create_es_index( - hostname: str, - port: int, + conn_uri: str, dbname: str, shards: int, replicas: int, @@ -114,11 +107,7 @@ def create_es_index( with open(mappings_json, "r") as f: mappings = json.load(f) - es = Elasticsearch( - [{"host": hostname, "port": port, "use_ssl": False, "url_prefix": ""}], - max_retries=5, - retry_on_timeout=True, - ) + es = Elasticsearch([conn_uri], max_retries=5, retry_on_timeout=True) es_version = es.info()["version"]["number"] es6 = int(es_version.split(".")[0]) >= 6 @@ -223,51 +212,6 @@ def create_es_index( print("Account created!") -def get_kibble_yaml() -> str: - """Resolve path to kibble config yaml""" - kibble_yaml = KIBBLE_YAML - if os.path.exists(kibble_yaml): - print(f"{kibble_yaml} already exists! Writing to {kibble_yaml}.tmp instead") - kibble_yaml = kibble_yaml + ".tmp" - return kibble_yaml - - -def save_config(mlserver: str, hostname: str, port: int, dbname: str): - """Save kibble config to yaml file""" - if ":" in mlserver: - try: - mailhost, mailport = mlserver.split(":") - except ValueError: - raise ValueError( - "mailhost argument must be in form of `host:port` or `host`" - ) - else: - mailhost = mlserver - mailport = 25 - - config = { - "api": {"version": KIBBLE_VERSION, "database": KIBBLE_DB_VERSION}, - "elasticsearch": { - "host": hostname, - "port": port, - "ssl": False, - "dbname": dbname, - }, - "mail": { - "mailhost": mailhost, - "mailport": int(mailport), - "sender": "Kibble ", - }, - "accounts": {"allowSignup": True, "verify": True}, - } - - kibble_yaml = get_kibble_yaml() - print(f"Writing Kibble config to {kibble_yaml}") - with open(kibble_yaml, "w") as f: - f.write(yaml.dump(config, default_flow_style=False)) - f.close() - - def get_user_input(msg: str, secure: bool = False): value = None while not value: @@ -279,8 +223,7 @@ def print_configuration(args): print( "Configuring Apache Kibble elasticsearch instance with the following arguments:" ) - print(f"- hostname: {args.hostname}") - print(f"- port: {int(args.port)}") + print(f"- conn_uri: {args.conn_uri}") print(f"- dbname: {args.dbname}") print(f"- shards: {int(args.shards)}") print(f"- replicas: {int(args.replicas)}") @@ -311,7 +254,7 @@ def main(): # Create Elasticsearch index # Retry in case ES is not yet up - print(f"Elasticsearch: {args.hostname}:{args.port}") + print(f"Elasticsearch: {args.conn_uri}") for attempt in tenacity.Retrying( retry=tenacity.retry_if_exception_type(exception_types=Exception), wait=tenacity.wait_fixed(10), @@ -321,8 +264,7 @@ def main(): with attempt: print("Trying to create ES index...") create_es_index( - hostname=args.hostname, - port=int(args.port), + conn_uri=args.conn_uri, dbname=args.dbname, shards=int(args.shards), replicas=int(args.replicas), @@ -331,15 +273,6 @@ def main(): skiponexist=args.skiponexist, ) print() - - # Create Kibble configuration file - save_config( - mlserver=args.mailhost, - hostname=args.hostname, - port=int(args.port), - dbname=args.dbname, - ) - print() print("All done, Kibble should...work now :)") diff --git a/setup.py b/setup.py index abfe996f..6c3de19a 100644 --- a/setup.py +++ b/setup.py @@ -28,7 +28,7 @@ spec.loader.exec_module(mod) # type: ignore version = mod.version # type: ignore -DEVEL_REQUIREMENTS = ["pre-commit==2.7.1", "black==20.8b1"] +DEVEL_REQUIREMENTS = ["black==20.8b1", "pre-commit==2.7.1", "pytest==6.1.1"] INSTALL_REQUIREMENTS = [ "bcrypt==3.2.0", diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 00000000..13a83393 --- /dev/null +++ b/tests/__init__.py @@ -0,0 +1,16 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. diff --git a/tests/test_configuration.py b/tests/test_configuration.py new file mode 100644 index 00000000..32c13f1d --- /dev/null +++ b/tests/test_configuration.py @@ -0,0 +1,43 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import pytest + +from kibble.configuration import conf + + +class TestDefaultConfig: + @pytest.mark.parametrize( + "section, key, value", + [ + ("accounts", "allowSignup", True), + ("accounts", "verify", True), + ("api", "database", 2), + ("api", "version", "0.1.0"), + ("elasticsearch", "conn_uri", "elasticsearch:9200"), + ("mail", "mailhost", "localhost:25"), + ], + ) + def test_default_values(self, section, key, value): + if isinstance(value, bool): + config_value = conf.getboolean(section, key) + elif isinstance(value, int): + config_value = conf.getint(section, key) + else: + config_value = conf.get(section, key) + + assert config_value == value From 0a11b51dfa01521a10887556124035107ad1fd80 Mon Sep 17 00:00:00 2001 From: Tomek Urbaszek Date: Tue, 10 Nov 2020 22:54:53 +0100 Subject: [PATCH 17/48] Add scanners to main repository (#78) --- kibble/scanners/README.md | 80 ++++ kibble/scanners/__init__.py | 16 + kibble/scanners/brokers/__init__.py | 16 + kibble/scanners/brokers/kibbleES.py | 367 +++++++++++++++++ kibble/scanners/config.yaml | 39 ++ kibble/scanners/kibble-scanner.py | 197 +++++++++ kibble/scanners/mapping.json | 455 +++++++++++++++++++++ kibble/scanners/scanners/__init__.py | 61 +++ kibble/scanners/scanners/bugzilla.py | 465 ++++++++++++++++++++++ kibble/scanners/scanners/buildbot.py | 281 +++++++++++++ kibble/scanners/scanners/discourse.py | 345 ++++++++++++++++ kibble/scanners/scanners/gerrit.py | 258 ++++++++++++ kibble/scanners/scanners/git-census.py | 328 +++++++++++++++ kibble/scanners/scanners/git-evolution.py | 259 ++++++++++++ kibble/scanners/scanners/git-sloc.py | 87 ++++ kibble/scanners/scanners/git-sync.py | 174 ++++++++ kibble/scanners/scanners/github-issues.py | 245 ++++++++++++ kibble/scanners/scanners/github-stats.py | 139 +++++++ kibble/scanners/scanners/jenkins.py | 356 +++++++++++++++++ kibble/scanners/scanners/jira.py | 463 +++++++++++++++++++++ kibble/scanners/scanners/pipermail.py | 295 ++++++++++++++ kibble/scanners/scanners/ponymail-kpe.py | 134 +++++++ kibble/scanners/scanners/ponymail-tone.py | 137 +++++++ kibble/scanners/scanners/ponymail.py | 309 ++++++++++++++ kibble/scanners/scanners/travis.py | 393 ++++++++++++++++++ kibble/scanners/scanners/twitter.py | 149 +++++++ kibble/scanners/utils/__init__.py | 16 + kibble/scanners/utils/git.py | 93 +++++ kibble/scanners/utils/github.py | 97 +++++ kibble/scanners/utils/jsonapi.py | 108 +++++ kibble/scanners/utils/kpe.py | 169 ++++++++ kibble/scanners/utils/sloc.py | 78 ++++ kibble/scanners/utils/tone.py | 197 +++++++++ kibble/scanners/utils/urlmisc.py | 79 ++++ setup.py | 3 + 35 files changed, 6888 insertions(+) create mode 100644 kibble/scanners/README.md create mode 100644 kibble/scanners/__init__.py create mode 100644 kibble/scanners/brokers/__init__.py create mode 100644 kibble/scanners/brokers/kibbleES.py create mode 100644 kibble/scanners/config.yaml create mode 100644 kibble/scanners/kibble-scanner.py create mode 100644 kibble/scanners/mapping.json create mode 100644 kibble/scanners/scanners/__init__.py create mode 100644 kibble/scanners/scanners/bugzilla.py create mode 100644 kibble/scanners/scanners/buildbot.py create mode 100644 kibble/scanners/scanners/discourse.py create mode 100644 kibble/scanners/scanners/gerrit.py create mode 100644 kibble/scanners/scanners/git-census.py create mode 100644 kibble/scanners/scanners/git-evolution.py create mode 100644 kibble/scanners/scanners/git-sloc.py create mode 100644 kibble/scanners/scanners/git-sync.py create mode 100644 kibble/scanners/scanners/github-issues.py create mode 100644 kibble/scanners/scanners/github-stats.py create mode 100644 kibble/scanners/scanners/jenkins.py create mode 100644 kibble/scanners/scanners/jira.py create mode 100644 kibble/scanners/scanners/pipermail.py create mode 100644 kibble/scanners/scanners/ponymail-kpe.py create mode 100644 kibble/scanners/scanners/ponymail-tone.py create mode 100644 kibble/scanners/scanners/ponymail.py create mode 100644 kibble/scanners/scanners/travis.py create mode 100644 kibble/scanners/scanners/twitter.py create mode 100644 kibble/scanners/utils/__init__.py create mode 100644 kibble/scanners/utils/git.py create mode 100644 kibble/scanners/utils/github.py create mode 100644 kibble/scanners/utils/jsonapi.py create mode 100644 kibble/scanners/utils/kpe.py create mode 100644 kibble/scanners/utils/sloc.py create mode 100644 kibble/scanners/utils/tone.py create mode 100644 kibble/scanners/utils/urlmisc.py diff --git a/kibble/scanners/README.md b/kibble/scanners/README.md new file mode 100644 index 00000000..d32c228b --- /dev/null +++ b/kibble/scanners/README.md @@ -0,0 +1,80 @@ +# Kibble Scanner Application +The Kibble Scanners collect information for the Kibble Suite. + +## Setup instructions: + + - Edit conf/config.yaml to match your Kibble service + +## How to run: + + - On a daily/weekly/whatever basis, run: `python3 src/kibble-scanner.py`. + +### Command line options: + + usage: kibble-scanner.py [-h] [-o ORG] [-f CONFIG] [-a AGE] [-s SOURCE] + [-n NODES] [-t TYPE] [-e EXCLUDE [EXCLUDE ...]] + [-v VIEW] + + optional arguments: + -h, --help show this help message and exit + -o ORG, --org ORG The organisation to gather stats for. If left out, all + organisations will be scanned. + -f CONFIG, --config CONFIG + Location of the yaml config file (full path) + -a AGE, --age AGE Minimum age in hours before performing a new scan on + an already processed source. --age 12 will not process + any source that was processed less than 12 hours ago, + but will process new sources. + -s SOURCE, --source SOURCE + A specific source (wildcard) to run scans on. + -n NODES, --nodes NODES + Number of nodes in the cluster (used for load + balancing) + -t TYPE, --type TYPE Specific type of scanner to run (default is run all + scanners) + -e EXCLUDE [EXCLUDE ...], --exclude EXCLUDE [EXCLUDE ...] + Specific type of scanner(s) to exclude + -v VIEW, --view VIEW Specific source view to scan (default is scan all + sources) + + +## Directory structure: + + - `conf/`: Config files + - `src/`: + - - `kibble-scanner.py`: Main script for launching scans + - - `plugins/`: + - - - `brokers`: The various database brokers (ES or JSON API) + - - - `utils`: Utility libraries + - - - `scanners`: The individual scanner applications + +## Currently available scanner plugins: + + - Apache Pony Mail (`plugins/scanners/ponymail.py`) + - Atlassian JIRA (`plugins/scanners/jira.py`) + - BugZilla Issue Tracker (`plugins/scanners/bugzilla.py`) + - BuildBot (`plugins/scanners/buildbot.py`) + - Discourse (`plugins/scanners/discourse.py`) + - Gerrit Code Review (`plugins/scanners/gerrit.py`) + - Git Repository Fetcher (`plugins/scanners/git-sync.py`) + - Git Census Counter (`plugins/scanners/git-census.py`) + - Git Code Evolution Counter (`plugins/scanners/git-evolution.py`) + - Git SLoC Counter (`plugins/scanners/git-sloc.py`) + - GitHub Issues/PRs (`plugins/scanners/github.py`) + - GitHub Traffic Statistics (`plugins/scanners/github-stats.py`) + - GNU Mailman Pipermail (`plugins/scanners/pipermail.py`) + - Jenkins (`plugins/scanners/jenkins.py`) + - Travis CI (`plugins/scanners/travis.py`) + +## Requirements: + + - [cloc](https://github.com/AlDanial/cloc) version 1.76 or later `(optional)` + - git binaries + - python3 (3.3 or later) + - python3-elasticsearch + - python3-certifi + - python3-yaml + + +# Get involved + TBD. Please see https://kibble.apache.org/ for details! diff --git a/kibble/scanners/__init__.py b/kibble/scanners/__init__.py new file mode 100644 index 00000000..13a83393 --- /dev/null +++ b/kibble/scanners/__init__.py @@ -0,0 +1,16 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. diff --git a/kibble/scanners/brokers/__init__.py b/kibble/scanners/brokers/__init__.py new file mode 100644 index 00000000..13a83393 --- /dev/null +++ b/kibble/scanners/brokers/__init__.py @@ -0,0 +1,16 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. diff --git a/kibble/scanners/brokers/kibbleES.py b/kibble/scanners/brokers/kibbleES.py new file mode 100644 index 00000000..2e1d7c42 --- /dev/null +++ b/kibble/scanners/brokers/kibbleES.py @@ -0,0 +1,367 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import elasticsearch +import elasticsearch.helpers +import sys + +KIBBLE_DB_VERSION = 2 # Current DB struct version +ACCEPTED_DB_VERSIONS = [1, 2] # Versions we know how to work with. + + +class KibbleESWrapper: + """ + Class for rewriting old-style queries to the new ones, + where doc_type is an integral part of the DB name + """ + + def __init__(self, ES): + self.ES = ES + self.indices = self.indicesClass(ES) + + def get(self, index, doc_type, id): + return self.ES.get(index=index + "_" + doc_type, doc_type="_doc", id=id) + + def exists(self, index, doc_type, id): + return self.ES.exists(index=index + "_" + doc_type, doc_type="_doc", id=id) + + def delete(self, index, doc_type, id): + return self.ES.delete(index=index + "_" + doc_type, doc_type="_doc", id=id) + + def index(self, index, doc_type, id, body): + return self.ES.index( + index=index + "_" + doc_type, doc_type="_doc", id=id, body=body + ) + + def update(self, index, doc_type, id, body): + return self.ES.update( + index=index + "_" + doc_type, doc_type="_doc", id=id, body=body + ) + + def search(self, index, doc_type, size=100, body=None): + return self.ES.search( + index=index + "_" + doc_type, doc_type="_doc", size=size, body=body + ) + + def count(self, index, doc_type, body=None): + return self.ES.count(index=index + "_" + doc_type, doc_type="_doc", body=body) + + class indicesClass: + """ Indices helper class """ + + def __init__(self, ES): + self.ES = ES + + def exists(self, index): + return self.ES.indices.exists(index=index) + + +class KibbleESWrapperSeven: + """ + Class for rewriting old-style queries to the new ones, + where doc_type is an integral part of the DB name and NOT USED (>= 7.x) + """ + + def __init__(self, ES): + self.ES = ES + self.indices = self.indicesClass(ES) + + def get(self, index, doc_type, id): + return self.ES.get(index=index + "_" + doc_type, id=id) + + def exists(self, index, doc_type, id): + return self.ES.exists(index=index + "_" + doc_type, id=id) + + def delete(self, index, doc_type, id): + return self.ES.delete(index=index + "_" + doc_type, id=id) + + def index(self, index, doc_type, id, body): + return self.ES.index(index=index + "_" + doc_type, id=id, body=body) + + def update(self, index, doc_type, id, body): + return self.ES.update(index=index + "_" + doc_type, id=id, body=body) + + def search(self, index, doc_type, size=100, body=None): + return self.ES.search(index=index + "_" + doc_type, size=size, body=body) + + def count(self, index, doc_type, body=None): + return self.ES.count(index=index + "_" + doc_type, body=body) + + class indicesClass: + """ Indices helper class """ + + def __init__(self, ES): + self.ES = ES + + def exists(self, index): + return self.ES.indices.exists(index=index) + + +# This is redundant, refactor later? +def pprint(string, err=False): + line = "[core]: %s" % (string) + if err: + sys.stderr.write(line + "\n") + else: + print(line) + + +class KibbleBit: + """ KibbleBit class with direct ElasticSearch access """ + + def __init__(self, broker, organisation, tid): + self.config = broker.config + self.organisation = organisation + self.broker = broker + self.json_queue = [] + self.queueMax = 1000 # Entries to keep before bulk pushing + self.pluginname = "" + self.tid = tid + self.dbname = self.broker.config["elasticsearch"]["database"] + + def __del__(self): + """ On unload/delete, push the last chunks of data to ES """ + if self.json_queue: + print("Pushing stragglers") + self.bulk() + + def pprint(self, string, err=False): + line = "[thread#%i:%s]: %s" % (self.tid, self.pluginname, string) + if err: + sys.stderr.write(line + "\n") + else: + print(line) + + def updateSource(self, source): + """ Updates a source document, usually with a status update """ + self.broker.DB.index( + index=self.broker.config["elasticsearch"]["database"], + doc_type="source", + id=source["sourceID"], + body=source, + ) + + def get(self, doctype, docid): + """ Fetches a document from the DB """ + doc = self.broker.DB.get( + index=self.broker.config["elasticsearch"]["database"], + doc_type=doctype, + id=docid, + ) + if doc: + return doc["_source"] + return None + + def exists(self, doctype, docid): + """ Checks whether a document already exists or not """ + return self.broker.DB.exists( + index=self.broker.config["elasticsearch"]["database"], + doc_type=doctype, + id=docid, + ) + + def index(self, doctype, docid, document): + """ Adds a new document to the index """ + dbname = self.broker.config["elasticsearch"]["database"] + self.broker.DB.index(index=dbname, doc_type=doctype, id=docid, body=document) + + def append(self, t, doc): + """ Append a document to the bulk push queue """ + if not "id" in doc: + sys.stderr.write("No doc ID specified!\n") + return + doc["doctype"] = t + self.json_queue.append(doc) + # If we've crossed the bulk limit, do a push + if len(self.json_queue) > self.queueMax: + pprint("Bulk push forced") + self.bulk() + + def bulk(self): + """ Push pending JSON objects in the queue to ES""" + xjson = self.json_queue + js_arr = [] + self.json_queue = [] + for entry in xjson: + js = entry + doc = js + js["@version"] = 1 + dbname = self.broker.config["elasticsearch"]["database"] + if self.broker.noTypes: + dbname += "_%s" % js["doctype"] + js_arr.append( + { + "_op_type": "update" if js.get("upsert") else "index", + "_index": dbname, + "_type": "_doc", + "_id": js["id"], + "doc" if js.get("upsert") else "_source": doc, + "doc_as_upsert": True, + } + ) + else: + js_arr.append( + { + "_op_type": "update" if js.get("upsert") else "index", + "_index": dbname, + "_type": js["doctype"], + "_id": js["id"], + "doc" if js.get("upsert") else "_source": doc, + "doc_as_upsert": True, + } + ) + try: + elasticsearch.helpers.bulk(self.broker.oDB, js_arr) + except Exception as err: + pprint("Warning: Could not bulk insert: %s" % err) + + +class KibbleOrganisation: + """ KibbleOrg with direct ElasticSearch access """ + + def __init__(self, broker, org): + """ Init an org, set up ElasticSearch for KibbleBits later on """ + + self.broker = broker + self.id = org + + def sources(self, sourceType=None, view=None): + """ Get all sources or sources of a specific type for an org """ + s = [] + # Search for all sources of this organisation + mustArray = [{"term": {"organisation": self.id}}] + if view: + res = self.broker.DB.get( + index=self.broker.config["elasticsearch"]["database"], + doc_type="view", + id=view, + ) + if res: + mustArray.append({"terms": {"sourceID": res["_source"]["sourceList"]}}) + # If we want a specific source type, amend the search criteria + if sourceType: + mustArray.append({"term": {"type": sourceType}}) + # Run the search, fetch all results, 9999 max. TODO: Scroll??? + res = self.broker.DB.search( + index=self.broker.config["elasticsearch"]["database"], + doc_type="source", + size=9999, + body={"query": {"bool": {"must": mustArray}}, "sort": {"sourceURL": "asc"}}, + ) + + for hit in res["hits"]["hits"]: + if sourceType == None or hit["_source"]["type"] == sourceType: + s.append(hit["_source"]) + return s + + +""" Master Kibble Broker Class for direct ElasticSearch access """ + + +class Broker: + def __init__(self, config): + es_config = config["elasticsearch"] + auth = None + if "user" in es_config: + auth = (es_config["user"], es_config["password"]) + pprint( + "Connecting to ElasticSearch database at %s:%i..." + % (es_config["hostname"], es_config.get("port", 9200)) + ) + es = elasticsearch.Elasticsearch( + [ + { + "host": es_config["hostname"], + "port": int(es_config.get("port", 9200)), + "use_ssl": es_config.get("ssl", False), + "verify_certs": False, + "url_prefix": es_config.get("uri", ""), + "http_auth": auth, + } + ], + max_retries=5, + retry_on_timeout=True, + ) + es_info = es.info() + pprint("Connected!") + self.DB = es + self.oDB = es # Original ES class, always. the .DB may change + self.config = config + self.bitClass = KibbleBit + # This bit is required since ES 6.x and above don't like document types + self.noTypes = ( + True if int(es_info["version"]["number"].split(".")[0]) >= 6 else False + ) + self.seven = ( + True if int(es_info["version"]["number"].split(".")[0]) >= 7 else False + ) + if self.noTypes: + pprint("This is a type-less DB, expanding database names instead.") + if self.seven: + pprint("We're using ES >= 7.x, NO DOC_TYPE!") + es = KibbleESWrapperSeven(es) + else: + es = KibbleESWrapper(es) + self.DB = es + if not es.indices.exists(index=es_config["database"] + "_api"): + sys.stderr.write( + "Could not find database group %s_* in ElasticSearch!\n" + % es_config["database"] + ) + sys.exit(-1) + else: + pprint("This DB supports types, utilizing..") + if not es.indices.exists(index=es_config["database"]): + sys.stderr.write( + "Could not find database %s in ElasticSearch!\n" + % es_config["database"] + ) + sys.exit(-1) + apidoc = es.get(index=es_config["database"], doc_type="api", id="current")[ + "_source" + ] + # We currently accept and know how to use DB versions 1 and 2. + if apidoc["dbversion"] not in ACCEPTED_DB_VERSIONS: + if apidoc["dbversion"] > KIBBLE_DB_VERSION: + sys.stderr.write( + "The database '%s' uses a newer structure format (version %u) than the scanners (version %u). Please upgrade your scanners.\n" + % (es_config["database"], apidoc["dbversion"], KIBBLE_DB_VERSION) + ) + sys.exit(-1) + if apidoc["dbversion"] < KIBBLE_DB_VERSION: + sys.stderr.write( + "The database '%s' uses an older structure format (version %u) than the scanners (version %u). Please upgrade your main Kibble server.\n" + % (es_config["database"], apidoc["dbversion"], KIBBLE_DB_VERSION) + ) + sys.exit(-1) + + def organisations(self): + """ Return a list of all organisations """ + orgs = [] + + # Run the search, fetch all orgs, 9999 max. TODO: Scroll??? + res = self.DB.search( + index=self.config["elasticsearch"]["database"], + doc_type="organisation", + size=9999, + body={"query": {"match_all": {}}}, + ) + + for hit in res["hits"]["hits"]: + org = hit["_source"]["id"] + orgClass = KibbleOrganisation(self, org) + yield orgClass diff --git a/kibble/scanners/config.yaml b/kibble/scanners/config.yaml new file mode 100644 index 00000000..d835539a --- /dev/null +++ b/kibble/scanners/config.yaml @@ -0,0 +1,39 @@ +# If enabled, kibble scanners will use direct ES connection. +elasticsearch: + enabled: true + hostname: localhost + port: 9200 + ssl: false + uri: "" + database: kibble + +# If enabled, kibble scanners will use the HTTP JSON API +broker: + enabled: false + url: https://localhost/api/ + auth: + username: kibble + password: kibble4life + +# Scanner client options +scanner: + # scratchdir: Location for storing file objects like git repos etc + # This should be permanent to speed up scans of large repositories + # on consecutive scans, but may be ephemeral like /tmp + scratchdir: /tmp + # If you are load balancing the scans, you should specify + # how many nodes are working, and which one you are, + # using the format: $nodeNo/$totalNodes. If there are 4 nodes, + # each node will gat 1/4th of all jobs to work on. + #balance: 1/4 + +# Watson/BlueMix configuration for sentiment analysis, if applicable +#watson: +# username: uuid-here +# password: pass-here +# api: https://gateway-location.watsonplatform.net/tone-analyzer/api + +# Azure Text Analysis API configuration, if applicable +#azure: +# apikey: key-here +# location: west-us diff --git a/kibble/scanners/kibble-scanner.py b/kibble/scanners/kibble-scanner.py new file mode 100644 index 00000000..953505d6 --- /dev/null +++ b/kibble/scanners/kibble-scanner.py @@ -0,0 +1,197 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import os +import threading +import multiprocessing +from pprint import pprint + +import yaml +import time +import argparse +from kibble.scanners import scanners +from kibble.scanners.brokers import kibbleES + +VERSION = "0.1.0" +CONFIG_FILE = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.yaml") +PENDING_OBJECTS = [] +BIG_LOCK = threading.Lock() + + +def base_parser(): + arg_parser = argparse.ArgumentParser() + arg_parser.add_argument( + "-o", + "--org", + help="The organisation to gather stats for. If left out, all organisations will be scanned.", + ) + arg_parser.add_argument( + "-f", "--config", help="Location of the yaml config file (full path)" + ) + arg_parser.add_argument( + "-a", + "--age", + help="Minimum age in hours before performing a new scan on an already processed source. --age 12 will not process any source that was processed less than 12 hours ago, but will process new sources.", + ) + arg_parser.add_argument( + "-s", "--source", help="A specific source (wildcard) to run scans on." + ) + arg_parser.add_argument( + "-n", "--nodes", help="Number of nodes in the cluster (used for load balancing)" + ) + arg_parser.add_argument( + "-t", + "--type", + help="Specific type of scanner to run (default is run all scanners)", + ) + arg_parser.add_argument( + "-e", "--exclude", nargs="+", help="Specific type of scanner(s) to exclude" + ) + arg_parser.add_argument( + "-v", + "--view", + help="Specific source view to scan (default is scan all sources)", + ) + return arg_parser + + +def isMine(ID, config): + if config["scanner"].get("balance", None): + a = config["scanner"]["balance"].split("/") + nodeNo = int(a[0]) + numNodes = int(a[1]) + if numNodes == 0: + return True + bignum = int(ID, 16) % numNodes + if bignum == int(nodeNo) - 1: + return True + return False + return True + + +class scanThread(threading.Thread): + """ A thread object that grabs an item from the queue and processes + it, using whatever plugins will come out to play. """ + + def __init__(self, broker, org, i, t=None, e=None): + super(scanThread, self).__init__() + self.broker = broker + self.org = org + self.id = i + self.bit = self.broker.bitClass(self.broker, self.org, i) + self.stype = t + self.exclude = e + pprint("Initialized thread %i" % i) + + def run(self): + global BIG_LOCK, PENDING_OBJECTS + time.sleep(0.5) # Primarily to align printouts. + # While there are objects to snag + a = 0 + while PENDING_OBJECTS: + BIG_LOCK.acquire(blocking=True) + try: + # Try grabbing an object (might not be any left!) + obj = PENDING_OBJECTS.pop(0) + except: + pass + BIG_LOCK.release() + if obj: + # If load balancing jobs, make sure this one is ours + if isMine(obj["sourceID"], self.broker.config): + # Run through list of scanners in order, apply when useful + for sid, scanner in scanners.enumerate(): + + if scanner.accepts(obj): + self.bit.pluginname = "plugins/scanners/" + sid + # Excluded scanner type? + if self.exclude and sid in self.exclude: + continue + # Specific scanner type or no types mentioned? + if not self.stype or self.stype == sid: + scanner.scan(self.bit, obj) + else: + break + self.bit.pluginname = "core" + self.bit.pprint("No more objects, exiting!") + + +def main(): + pprint("Kibble Scanner v/%s starting" % VERSION) + global CONFIG_FILE, PENDING_OBJECTS + args = base_parser().parse_args() + + # Load config yaml + if args.config: + CONFIG_FILE = args.config + config = yaml.load(open(CONFIG_FILE)) + pprint("Loaded YAML config from %s" % CONFIG_FILE) + + pprint("Using direct ElasticSearch broker model") + broker = kibbleES.Broker(config) + + orgNo = 0 + sourceNo = 0 + for org in broker.organisations(): + if not args.org or args.org == org.id: + pprint("Processing organisation %s" % org.id) + orgNo += 1 + + # Compile source list + # If --age is passed, only append source that either + # have never been scanned, or have been scanned more than + # N hours ago by any scanner. + if args.age: + minAge = time.time() - int(args.age) * 3600 + for source in org.sources(view=args.view): + tooNew = False + if "steps" in source: + for key, step in source["steps"].items(): + if "time" in step and step["time"] >= minAge: + tooNew = True + break + if not tooNew: + if not args.source or (args.source == source["sourceID"]): + PENDING_OBJECTS.append(source) + else: + PENDING_OBJECTS = [] + for source in org.sources(view=args.view): + if not args.source or (args.source == source["sourceID"]): + PENDING_OBJECTS.append(source) + sourceNo += len(PENDING_OBJECTS) + + # Start up some threads equal to number of cores on the box, + # but no more than 4. We don't want an IOWait nightmare. + threads = [] + core_count = min((4, int(multiprocessing.cpu_count()))) + for i in range(0, core_count): + sThread = scanThread(broker, org, i + 1, args.type, args.exclude) + sThread.start() + threads.append(sThread) + + # Wait for them all to finish. + for t in threads: + t.join() + + pprint( + "All done scanning for now, found %i organisations and %i sources to process." + % (orgNo, sourceNo) + ) + + +if __name__ == "__main__": + main() diff --git a/kibble/scanners/mapping.json b/kibble/scanners/mapping.json new file mode 100644 index 00000000..04ca3f59 --- /dev/null +++ b/kibble/scanners/mapping.json @@ -0,0 +1,455 @@ +{ + "mappings": { + "email": { + "properties": { + "@version": { + "type": "long" + }, + "address": { + "type": "string", + "index": "not_analyzed" + }, + "date": { + "type": "date", + "store": true, + "format": "yyyy/MM/dd HH:mm:ss" + }, + "hash": { + "type": "string", + "index": "not_analyzed" + }, + "id": { + "type": "string", + "index": "not_analyzed" + }, + "organisation": { + "type": "string", + "index": "not_analyzed" + }, + "sender": { + "type": "string", + "index": "not_analyzed" + }, + "sourceID": { + "type": "string", + "index": "not_analyzed" + }, + "ts": { + "type": "long" + } + } + }, + "account": { + "properties": { + "cookie": { + "type": "string", + "index": "not_analyzed" + }, + "email": { + "type": "string", + "index": "not_analyzed" + }, + "fullname": { + "type": "string" + }, + "id": { + "type": "string", + "index": "not_analyzed" + }, + "organisation": { + "type": "string", + "index": "not_analyzed" + }, + "orgs": { + "type": "string" + }, + "password": { + "type": "string", + "index": "not_analyzed" + }, + "request_id": { + "type": "string", + "index": "not_analyzed" + }, + "screenname": { + "type": "string", + "index": "not_analyzed" + }, + "tag": { + "type": "string", + "index": "not_analyzed" + }, + "verified": { + "type": "boolean" + } + } + }, + "code_commit": { + "properties": { + "@version": { + "type": "long" + }, + "author_email": { + "type": "string", + "index": "not_analyzed" + }, + "author_name": { + "type": "string", + "index": "not_analyzed" + }, + "committer_email": { + "type": "string", + "index": "not_analyzed" + }, + "committer_name": { + "type": "string", + "index": "not_analyzed" + }, + "date": { + "type": "date", + "store": true, + "format": "yyyy/MM/dd HH:mm:ss" + }, + "deletions": { + "type": "long" + }, + "id": { + "type": "string", + "index": "not_analyzed" + }, + "insertions": { + "type": "long" + }, + "organisation": { + "type": "string", + "index": "not_analyzed" + }, + "sourceID": { + "type": "string", + "index": "not_analyzed" + }, + "source": { + "type": "string", + "index": "not_analyzed" + }, + "ts": { + "type": "long" + }, + "tsday": { + "type": "long" + }, + "vcs": { + "type": "string", + "index": "not_analyzed" + } + } + }, + "code_commit_unique": { + "properties": { + "@version": { + "type": "long" + }, + "author_email": { + "type": "string", + "index": "not_analyzed" + }, + "author_name": { + "type": "string", + "index": "not_analyzed" + }, + "committer_email": { + "type": "string", + "index": "not_analyzed" + }, + "committer_name": { + "type": "string", + "index": "not_analyzed" + }, + "date": { + "type": "date", + "store": true, + "format": "yyyy/MM/dd HH:mm:ss" + }, + "deletions": { + "type": "long" + }, + "id": { + "type": "string", + "index": "not_analyzed" + }, + "insertions": { + "type": "long" + }, + "organisation": { + "type": "string", + "index": "not_analyzed" + }, + "sourceID": { + "type": "string", + "index": "not_analyzed" + }, + "source": { + "type": "string", + "index": "not_analyzed" + }, + "ts": { + "type": "long" + }, + "tsday": { + "type": "long" + }, + "vcs": { + "type": "string", + "index": "not_analyzed" + } + } + }, + "org": { + "properties": { + "admins": { + "type": "string" + }, + "id": { + "type": "string", + "index": "not_analyzed" + }, + "name": { + "type": "string", + "index": "not_analyzed" + }, + "request_id": { + "type": "string", + "index": "not_analyzed" + } + } + }, + "mailstats": { + "properties": { + "authors": { + "type": "long" + }, + "date": { + "type": "date", + "store": true, + "format": "yyyy/MM/dd HH:mm:ss" + }, + "emails": { + "type": "long" + }, + "hash": { + "type": "string", + "index": "not_analyzed" + }, + "organisation": { + "type": "string", + "index": "not_analyzed" + }, + "sourceID": { + "type": "string", + "index": "not_analyzed" + }, + "topics": { + "type": "long" + } + } + }, + "mailtop": { + "properties": { + "date": { + "type": "date", + "store": true, + "format": "yyyy/MM/dd HH:mm:ss" + }, + "emails": { + "type": "long" + }, + "hash": { + "type": "string", + "index": "not_analyzed" + }, + "id": { + "type": "string", + "index": "not_analyzed" + }, + "organisation": { + "type": "string", + "index": "not_analyzed" + }, + "shash": { + "type": "string", + "index": "not_analyzed" + }, + "sourceID": { + "type": "string", + "index": "not_analyzed" + }, + "subject": { + "type": "string" + }, + "ts": { + "type": "long" + } + } + }, + "source": { + "properties": { + "default_branch": { + "type": "string" + }, + "exception": { + "type": "string" + }, + "good": { + "type": "boolean" + }, + "sourceID": { + "type": "string", + "index": "not_analyzed" + }, + "sourceURL": { + "type": "string", + "index": "not_analyzed" + }, + "organisation": { + "type": "string", + "index": "not_analyzed" + }, + "sync": { + "type": "double" + }, + "tag": { + "type": "string", + "index": "not_analyzed" + }, + "type": { + "type": "string", + "index": "not_analyzed" + } + } + }, + "person": { + "properties": { + "@version": { + "type": "long" + }, + "address": { + "type": "string", + "index": "not_analyzed" + }, + "email": { + "type": "string", + "index": "not_analyzed" + }, + "id": { + "type": "string", + "index": "not_analyzed" + }, + "name": { + "type": "string", + "index": "not_analyzed" + }, + "organisation": { + "type": "string", + "index": "not_analyzed" + } + } + }, + "issue": { + "properties": { + "assignee": { + "type": "string", + "index": "not_analyzed" + }, + "changeDate": { + "type": "date", + "store": true, + "format": "yyyy/MM/dd HH:mm:ss" + }, + "closed": { + "type": "double" + }, + "closedDate": { + "type": "date", + "format": "yyyy/MM/dd HH:mm:ss||yyyy/MM/dd||epoch_millis" + }, + "comments": { + "type": "long" + }, + "created": { + "type": "double" + }, + "createdDate": { + "type": "date", + "format": "yyyy/MM/dd HH:mm:ss||yyyy/MM/dd||epoch_millis" + }, + "id": { + "type": "string", + "index": "not_analyzed" + }, + "issueCloser": { + "type": "string", + "index": "not_analyzed" + }, + "issueCreator": { + "type": "string", + "index": "not_analyzed" + }, + "key": { + "type": "string", + "index": "not_analyzed" + }, + "organisation": { + "type": "string", + "index": "not_analyzed" + }, + "sourceID": { + "type": "string", + "index": "not_analyzed" + }, + "status": { + "type": "string", + "index": "not_analyzed" + }, + "title": { + "type": "string", + "index": "not_analyzed" + }, + "url": { + "type": "string", + "index": "not_analyzed" + } + } + }, + "evolution": { + "properties": { + "blank": { + "type": "long" + }, + "comments": { + "type": "long" + }, + "cost": { + "type": "double" + }, + "loc": { + "type": "long" + }, + "organisation": { + "type": "string", + "index": "not_analyzed" + }, + "sourceID": { + "type": "string", + "index": "not_analyzed" + }, + "time": { + "type": "double" + }, + "years": { + "type": "double" + } + } + } + } + +} diff --git a/kibble/scanners/scanners/__init__.py b/kibble/scanners/scanners/__init__.py new file mode 100644 index 00000000..7b51436b --- /dev/null +++ b/kibble/scanners/scanners/__init__.py @@ -0,0 +1,61 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +""" +This file contains, in execution order, a list of the available +scanners that Kibble has. +""" + +import importlib + +# Define, in order of priority, all scanner plugins we have +__all__ = [ + "git-sync", # This needs to precede other VCS scanners! + "git-census", + "git-sloc", + "git-evolution", + "jira", + "ponymail", + "ponymail-tone", + "ponymail-kpe", + "pipermail", + "github-issues", + "bugzilla", + "gerrit", + "jenkins", + "buildbot", + "travis", + "discourse", +] + +# Import each plugin into a hash called 'scanners' +scanners = {} + +for p in __all__: + scanner = importlib.import_module("kibble.scanners.scanners.%s" % p) + scanners[p] = scanner + # This should ideally be pprint, meh + print( + "[core]: Loaded plugins/scanners/%s v/%s (%s)" + % (p, scanner.version, scanner.title) + ) + + +def enumerate(): + """ Returns the scanners as a dictionary, sorted by run-order """ + for p in __all__: + yield (p, scanners[p]) diff --git a/kibble/scanners/scanners/bugzilla.py b/kibble/scanners/scanners/bugzilla.py new file mode 100644 index 00000000..e45a69e5 --- /dev/null +++ b/kibble/scanners/scanners/bugzilla.py @@ -0,0 +1,465 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +""" This is the BugZilla scanner plugin for Kible """ + +import re +import json +import time +import hashlib +from threading import Thread, Lock +from kibble.scanners.utils import jsonapi +import urllib + +title = "Scanner for BugZilla" +version = "0.1.0" + + +def accepts(source): + """ Determine if this is a BugZilla source """ + if source["type"] == "bugzilla": + return True + if source["type"] == "issuetracker": + bz = re.match( + r"(https?://\S+?)(/jsonrpc\.cgi)?[\s:?]+(.+)", source["sourceURL"] + ) + if bz: + return True + return False + + +def getTime(string): + return time.mktime( + time.strptime(re.sub(r"[zZ]", "", str(string)), "%Y-%m-%dT%H:%M:%S") + ) + + +def assigned(js): + if "items" in js: + for item in js["items"]: + if item["field"] == "assignee": + return True + return False + + +def wfi(js): + if "items" in js: + for item in js["items"]: + if item["field"] == "status" and item["toString"] == "Waiting for Infra": + return True + return False + + +def wfu(js): + if "items" in js: + for item in js["items"]: + if item["field"] == "status" and item["toString"] == "Waiting for user": + return True + return False + + +def moved(js): + if "items" in js: + for item in js["items"]: + if item["field"] == "Key" and item["toString"].find("INFRA-") != -1: + return True + return False + + +def wasclosed(js): + if "changelog" in js: + cjs = js["changelog"]["histories"] + for citem in cjs: + if "items" in citem: + for item in citem["items"]: + if item["field"] == "status" and ( + item["toString"] == "Closed" or item["toString"] == "Resolved" + ): + return (True, citem["author"]) + else: + if "items" in js: + for item in js["items"]: + if item["field"] == "status" and item["toString"] == "Closed": + return (True, None) + return (False, None) + + +def resolved(js): + if "items" in js: + for item in js["items"]: + if item["field"] == "resolution" and ( + item["toString"] != "Pending Closed" + and item["toString"] != "Unresolved" + ): + return True + return False + + +def pchange(js): + if "items" in js: + for item in js["items"]: + if item["field"] == "priority": + return True + return False + + +def scanTicket(bug, KibbleBit, source, openTickets, u, dom): + try: + key = bug["id"] + dhash = hashlib.sha224( + ("%s-%s-%s" % (source["organisation"], source["sourceURL"], key)).encode( + "ascii", errors="replace" + ) + ).hexdigest() + found = KibbleBit.exists("issue", dhash) + parseIt = False + if not found: + parseIt = True + else: + ticket = KibbleBit.get("issue", dhash) + if ticket["status"] == "closed" and key in openTickets: + KibbleBit.pprint("Ticket was reopened, reparsing") + parseIt = True + elif ticket["status"] == "open" and not key in openTickets: + KibbleBit.pprint("Ticket was recently closed, parsing it") + parseIt = True + else: + pass + # print("Ticket hasn't changed, ignoring...") + + if parseIt: + KibbleBit.pprint("Parsing data from BugZilla for #%s" % key) + + params = {"ids": [int(key)], "limit": 0} + if ( + source["creds"] + and "username" in source["creds"] + and source["creds"]["username"] + and len(source["creds"]["username"]) > 0 + ): + params["Bugzilla_login"] = source["creds"]["username"] + params["Bugzilla_password"] = source["creds"]["password"] + ticketsURL = "%s?method=Bug.get¶ms=[%s]" % ( + u, + urllib.parse.quote(json.dumps(params)), + ) + + js = jsonapi.get(ticketsURL) + js = js["result"]["bugs"][0] + creator = {"name": bug["creator"], "email": js["creator"]} + closer = {} + cd = getTime(js["creation_time"]) + rd = None + status = "open" + if js["status"] in ["CLOSED", "RESOLVED"]: + status = "closed" + KibbleBit.pprint("%s was closed, finding out who did that" % key) + ticketsURL = "%s?method=Bug.history¶ms=[%s]" % ( + u, + urllib.parse.quote(json.dumps(params)), + ) + hjs = jsonapi.get(ticketsURL) + history = hjs["result"]["bugs"][0]["history"] + for item in history: + for change in item["changes"]: + if ( + change["field_name"] == "status" + and "added" in change + and change["added"] in ["CLOSED", "RESOLVED"] + ): + rd = getTime(item["when"]) + closer = {"name": item["who"], "email": item["who"]} + break + KibbleBit.pprint("Counting comments for %s..." % key) + ticketsURL = "%s?method=Bug.comments¶ms=[%s]" % ( + u, + urllib.parse.quote(json.dumps(params)), + ) + hjs = jsonapi.get(ticketsURL) + comments = len(hjs["result"]["bugs"][str(key)]["comments"]) + + title = bug["summary"] + del params["ids"] + if closer: + + pid = hashlib.sha1( + ("%s%s" % (source["organisation"], closer["email"])).encode( + "ascii", errors="replace" + ) + ).hexdigest() + found = KibbleBit.exists("person", pid) + if not found: + params["names"] = [closer["email"]] + ticketsURL = "%s?method=User.get¶ms=[%s]" % ( + u, + urllib.parse.quote(json.dumps(params)), + ) + + try: + ujs = jsonapi.get(ticketsURL) + displayName = ujs["result"]["users"][0]["real_name"] + except: + displayName = closer["email"] + if displayName and len(displayName) > 0: + # Add to people db + + jsp = { + "name": displayName, + "email": closer["email"], + "organisation": source["organisation"], + "id": pid, + } + # print("Updating person DB for closer: %s (%s)" % (displayName, closerEmail)) + KibbleBit.index("person", pid, jsp) + + if creator: + pid = hashlib.sha1( + ("%s%s" % (source["organisation"], creator["email"])).encode( + "ascii", errors="replace" + ) + ).hexdigest() + found = KibbleBit.exists("person", pid) + if not found: + if not creator["name"]: + params["names"] = [creator["email"]] + ticketsURL = "%s?method=User.get¶ms=[%s]" % ( + u, + urllib.parse.quote(json.dumps(params)), + ) + try: + ujs = jsonapi.get(ticketsURL) + creator["name"] = ujs["result"]["users"][0]["real_name"] + except: + creator["name"] = creator["email"] + if creator["name"] and len(creator["name"]) > 0: + # Add to people db + + jsp = { + "name": creator["name"], + "email": creator["email"], + "organisation": source["organisation"], + "id": pid, + } + KibbleBit.index("person", pid, jsp) + + jso = { + "id": dhash, + "key": key, + "organisation": source["organisation"], + "sourceID": source["sourceID"], + "url": "%s/show_bug.cgi?id=%s" % (dom, key), + "status": status, + "created": cd, + "closed": rd, + "issuetype": "issue", + "issueCloser": closer["email"] if "email" in closer else None, + "createdDate": time.strftime("%Y/%m/%d %H:%M:%S", time.gmtime(cd)), + "closedDate": time.strftime("%Y/%m/%d %H:%M:%S", time.gmtime(rd)) + if rd + else None, + "changeDate": time.strftime( + "%Y/%m/%d %H:%M:%S", time.gmtime(rd if rd else cd) + ), + "assignee": None, + "issueCreator": creator["email"], + "comments": comments, + "title": title, + } + KibbleBit.append("issue", jso) + time.sleep(0.5) # BugZilla is notoriously slow. Maybe remove this later + return True + except Exception as err: + KibbleBit.pprint(err) + return False + + +class bzThread(Thread): + def __init__(self, KibbleBit, source, block, pt, ot, u, dom): + super(bzThread, self).__init__() + self.KibbleBit = KibbleBit + self.source = source + self.block = block + self.pendingTickets = pt + self.openTickets = ot + self.u = u + self.dom = dom + + def run(self): + badOnes = 0 + + while len(self.pendingTickets) > 0 and badOnes <= 50: + if len(self.pendingTickets) % 10 == 0: + self.KibbleBit.pprint( + "%u elements left to count" % len(self.pendingTickets) + ) + self.block.acquire() + try: + rl = self.pendingTickets.pop(0) + except Exception as err: # list empty, likely + self.block.release() + return + if not rl: + self.block.release() + return + self.block.release() + if not scanTicket( + rl, self.KibbleBit, self.source, self.openTickets, self.u, self.dom + ): + self.KibbleBit.pprint("Ticket %s seems broken, skipping" % rl["id"]) + badOnes += 1 + if badOnes > 50: + self.KibbleBit.pprint("Too many errors, bailing!") + self.source["steps"]["issues"] = { + "time": time.time(), + "status": "Too many errors while parsing at " + + time.strftime("%Y/%m/%d %H:%M:%S", time.gmtime(time.time())), + "running": False, + "good": False, + } + self.KibbleBit.updateSource(self.source) + return + else: + badOnes = 0 + + +def scan(KibbleBit, source): + path = source["sourceID"] + url = source["sourceURL"] + + source["steps"]["issues"] = { + "time": time.time(), + "status": "Parsing BugZilla changes...", + "running": True, + "good": True, + } + KibbleBit.updateSource(source) + + bz = re.match(r"(https?://\S+?)(/jsonrpc\.cgi)?[\s:?]+(.+)", url) + if bz: + if ( + source["creds"] + and "username" in source["creds"] + and source["creds"]["username"] + and len(source["creds"]["username"]) > 0 + ): + creds = "%s:%s" % (source["creds"]["username"], source["creds"]["password"]) + badOnes = 0 + pendingTickets = [] + openTickets = [] + + # Get base URL, list and domain to parse + dom = bz.group(1) + dom = re.sub(r"/+$", "", dom) + u = "%s/jsonrpc.cgi" % dom + instance = bz.group(3) + lastTicket = 0 + + params = { + "product": [instance], + "status": [ + "RESOLVED", + "CLOSED", + "NEW", + "UNCOMFIRMED", + "ASSIGNED", + "REOPENED", + "VERIFIED", + ], + "include_fields": ["id", "creation_time", "status", "summary", "creator"], + "limit": 10000, + "offset": 1, + } + # If * is requested, just omit the product name + if instance == "*": + params = { + "status": [ + "RESOLVED", + "CLOSED", + "NEW", + "UNCOMFIRMED", + "ASSIGNED", + "REOPENED", + "VERIFIED", + ], + "include_fields": [ + "id", + "creation_time", + "status", + "summary", + "creator", + ], + "limit": 10000, + "offset": 1, + } + + ticketsURL = "%s?method=Bug.search¶ms=[%s]" % ( + u, + urllib.parse.quote(json.dumps(params)), + ) + + while True: + try: + js = jsonapi.get(ticketsURL, auth=creds) + except: + KibbleBit.pprint("Couldn't fetch more tickets, bailing") + break + + if len(js["result"]["bugs"]) > 0: + KibbleBit.pprint( + "%s: Found %u tickets..." + % ( + source["sourceURL"], + ((params.get("offset", 1) - 1) + len(js["result"]["bugs"])), + ) + ) + for bug in js["result"]["bugs"]: + pendingTickets.append(bug) + if not bug["status"] in ["RESOLVED", "CLOSED"]: + openTickets.append(bug["id"]) + params["offset"] += 10000 + ticketsURL = "%s?method=Bug.search¶ms=[%s]" % ( + u, + urllib.parse.quote(json.dumps(params)), + ) + else: + KibbleBit.pprint("No more tickets left to scan") + break + + KibbleBit.pprint( + "Found %u open tickets, %u closed." + % (len(openTickets), len(pendingTickets) - len(openTickets)) + ) + + badOnes = 0 + block = Lock() + threads = [] + for i in range(0, 4): + t = bzThread(KibbleBit, source, block, pendingTickets, openTickets, u, dom) + threads.append(t) + t.start() + + for t in threads: + t.join() + + source["steps"]["issues"] = { + "time": time.time(), + "status": "Issue tracker (BugZilla) successfully scanned at " + + time.strftime("%Y/%m/%d %H:%M:%S", time.gmtime(time.time())), + "running": False, + "good": True, + } + KibbleBit.updateSource(source) diff --git a/kibble/scanners/scanners/buildbot.py b/kibble/scanners/scanners/buildbot.py new file mode 100644 index 00000000..8a89e331 --- /dev/null +++ b/kibble/scanners/scanners/buildbot.py @@ -0,0 +1,281 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import time +import datetime +import re +import hashlib +import threading + +from kibble.scanners.utils import jsonapi + +""" +This is the Kibble Buildbot scanner plugin. +""" + +title = "Scanner for Buildbot" +version = "0.1.0" + + +def accepts(source): + """ Determines whether we want to handle this source """ + if source["type"] == "buildbot": + return True + return False + + +def scanJob(KibbleBit, source, job, creds): + """ Scans a single job for activity """ + NOW = int(datetime.datetime.utcnow().timestamp()) + dhash = hashlib.sha224( + ("%s-%s-%s" % (source["organisation"], source["sourceID"], job)).encode( + "ascii", errors="replace" + ) + ).hexdigest() + found = True + doc = None + parseIt = False + found = KibbleBit.exists("cijob", dhash) + + jobURL = "%s/json/builders/%s/builds/_all" % (source["sourceURL"], job) + KibbleBit.pprint(jobURL) + jobjson = jsonapi.get(jobURL, auth=creds) + + # If valid JSON, ... + if jobjson: + for buildno, data in jobjson.items(): + buildhash = hashlib.sha224( + ( + "%s-%s-%s-%s" + % (source["organisation"], source["sourceID"], job, buildno) + ).encode("ascii", errors="replace") + ).hexdigest() + builddoc = None + try: + builddoc = KibbleBit.get("ci_build", buildhash) + except: + pass + + # If this build already completed, no need to parse it again + if builddoc and builddoc.get("completed", False): + continue + + KibbleBit.pprint( + "[%s-%s] This is new or pending, analyzing..." % (job, buildno) + ) + + completed = True if "currentStep" in data else False + + # Get build status (success, failed, canceled etc) + status = "building" + if "successful" in data.get("text", []): + status = "success" + if "failed" in data.get("text", []): + status = "failed" + if "exception" in data.get("text", []): + status = "aborted" + + DUR = 0 + # Calc when the build finished + if completed and len(data.get("times", [])) == 2 and data["times"][1]: + FIN = data["times"][1] + DUR = FIN - data["times"][0] + else: + FIN = 0 + + doc = { + # Build specific data + "id": buildhash, + "date": time.strftime("%Y/%m/%d %H:%M:%S", time.gmtime(FIN)), + "buildID": buildno, + "completed": completed, + "duration": DUR * 1000, # Buildbot does seconds, not milis + "job": job, + "jobURL": "%s/builders/%s" % (source["sourceURL"], job), + "status": status, + "started": int(data["times"][0]), + "ci": "buildbot", + # Standard docs values + "sourceID": source["sourceID"], + "organisation": source["organisation"], + "upsert": True, + } + KibbleBit.append("ci_build", doc) + # Yay, it worked! + return True + + # Boo, it failed! + KibbleBit.pprint("Fetching job data failed!") + return False + + +class buildbotThread(threading.Thread): + """ Generic thread class for scheduling multiple scans at once """ + + def __init__(self, block, KibbleBit, source, creds, jobs): + super(buildbotThread, self).__init__() + self.block = block + self.KibbleBit = KibbleBit + self.creds = creds + self.source = source + self.jobs = jobs + + def run(self): + badOnes = 0 + while len(self.jobs) > 0 and badOnes <= 50: + self.block.acquire() + try: + job = self.jobs.pop(0) + except Exception as err: + self.block.release() + return + if not job: + self.block.release() + return + self.block.release() + if not scanJob(self.KibbleBit, self.source, job, self.creds): + self.KibbleBit.pprint("[%s] This borked, trying another one" % job) + badOnes += 1 + if badOnes > 100: + self.KibbleBit.pprint("Too many errors, bailing!") + self.source["steps"]["ci"] = { + "time": time.time(), + "status": "Too many errors while parsing at " + + time.strftime("%Y/%m/%d %H:%M:%S", time.gmtime(time.time())), + "running": False, + "good": False, + } + self.KibbleBit.updateSource(self.source) + return + else: + badOnes = 0 + + +def scan(KibbleBit, source): + # Simple URL check + buildbot = re.match(r"(https?://.+)", source["sourceURL"]) + if buildbot: + + source["steps"]["ci"] = { + "time": time.time(), + "status": "Parsing Buildbot job changes...", + "running": True, + "good": True, + } + KibbleBit.updateSource(source) + + badOnes = 0 + pendingJobs = [] + KibbleBit.pprint("Parsing Buildbot activity at %s" % source["sourceURL"]) + source["steps"]["ci"] = { + "time": time.time(), + "status": "Downloading changeset", + "running": True, + "good": True, + } + KibbleBit.updateSource(source) + + # Buildbot may neeed credentials + creds = None + if ( + source["creds"] + and "username" in source["creds"] + and source["creds"]["username"] + and len(source["creds"]["username"]) > 0 + ): + creds = "%s:%s" % (source["creds"]["username"], source["creds"]["password"]) + + # Get the job list + sURL = source["sourceURL"] + KibbleBit.pprint("Getting job list...") + builders = jsonapi.get("%s/json/builders" % sURL, auth=creds) + + # Save queue snapshot + NOW = int(datetime.datetime.utcnow().timestamp()) + queuehash = hashlib.sha224( + ( + "%s-%s-queue-%s" + % (source["organisation"], source["sourceID"], int(time.time())) + ).encode("ascii", errors="replace") + ).hexdigest() + + # Scan queue items + blocked = 0 + stuck = 0 + totalqueuetime = 0 + labelQueuedBuilds = {} + queueSize = 0 + actualQueueSize = 0 + building = 0 + jobs = [] + + for builder, data in builders.items(): + jobs.append(builder) + if data["state"] == "building": + building += 1 + if data.get("pendingBuilds", 0) > 0: + # All queued items, even offlined builders + actualQueueSize += data.get("pendingBuilds", 0) + # Only queues with an online builder (actually waiting stuff) + if data["state"] == "building": + queueSize += data.get("pendingBuilds", 0) + blocked += data.get("pendingBuilds", 0) # Blocked by running builds + # Stuck builds (iow no builder available) + if data["state"] == "offline": + stuck += data.get("pendingBuilds", 0) + + # Write up a queue doc + queuedoc = { + "id": queuehash, + "date": time.strftime("%Y/%m/%d %H:%M:%S", time.gmtime(NOW)), + "time": NOW, + "size": queueSize, + "blocked": blocked, + "stuck": stuck, + "building": building, + "ci": "buildbot", + # Standard docs values + "sourceID": source["sourceID"], + "organisation": source["organisation"], + "upsert": True, + } + KibbleBit.append("ci_queue", queuedoc) + + KibbleBit.pprint("Found %u builders in Buildbot" % len(jobs)) + + threads = [] + block = threading.Lock() + KibbleBit.pprint("Scanning jobs using 4 sub-threads") + for i in range(0, 4): + t = buildbotThread(block, KibbleBit, source, creds, jobs) + threads.append(t) + t.start() + + for t in threads: + t.join() + + # We're all done, yaay + KibbleBit.pprint("Done scanning %s" % source["sourceURL"]) + + source["steps"]["ci"] = { + "time": time.time(), + "status": "Buildbot successfully scanned at " + + time.strftime("%Y/%m/%d %H:%M:%S", time.gmtime(time.time())), + "running": False, + "good": True, + } + KibbleBit.updateSource(source) diff --git a/kibble/scanners/scanners/discourse.py b/kibble/scanners/scanners/discourse.py new file mode 100644 index 00000000..8ed1159e --- /dev/null +++ b/kibble/scanners/scanners/discourse.py @@ -0,0 +1,345 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import time +import datetime +import re +import hashlib +import threading +import os + +from kibble.scanners.utils import jsonapi + +""" +This is the Kibble Discourse scanner plugin. +""" + +title = "Scanner for Discourse Forums" +version = "0.1.0" + + +def accepts(source): + """ Determines whether we want to handle this source """ + if source["type"] == "discourse": + return True + return False + + +def scanJob(KibbleBit, source, cat, creds): + """ Scans a single discourse category for activity """ + NOW = int(datetime.datetime.utcnow().timestamp()) + + # Get $discourseURL/c/$catID + + catURL = os.path.join(source["sourceURL"], "c/%s" % cat["id"]) + KibbleBit.pprint("Scanning Discourse category '%s' at %s" % (cat["slug"], catURL)) + + page = 0 + allUsers = {} + + # For each paginated result (up to page 100), check for changes + while page < 100: + pcatURL = "%s?page=%u" % (catURL, page) + catjson = jsonapi.get(pcatURL, auth=creds) + page += 1 + + if catjson: + + # If we hit an empty list (no more topics), just break the loop. + if not catjson["topic_list"]["topics"]: + break + + # First (if we have data), we should store the known users + # Since discourse hides the email (obviously!), we'll have to + # fake one to generate an account. + fakeDomain = "foo.discourse" + m = re.match(r"https?://([-a-zA-Z0-9.]+)", source["sourceURL"]) + if m: + fakeDomain = m.group(1) + for user in catjson["users"]: + # Fake email address, compute deterministic ID + email = "%s@%s" % (user["username"], fakeDomain) + dhash = hashlib.sha224( + ( + "%s-%s-%s" + % (source["organisation"], source["sourceURL"], email) + ).encode("ascii", errors="replace") + ).hexdigest() + + # Construct a very sparse user document + userDoc = { + "id": dhash, + "organisation": source["organisation"], + "name": user["username"], + "email": email, + } + + # Store user-ID-to-username mapping for later + allUsers[user["id"]] = userDoc + + # Store it (or, queue storage) unless it exists. + # We don't wanna override better data, so we check if + # it's there first. + if not KibbleBit.exists("person", dhash): + KibbleBit.append("person", userDoc) + + # Now, for each topic, we'll store a topic document + for topic in catjson["topic_list"]["topics"]: + + # Calculate topic ID + dhash = hashlib.sha224( + ( + "%s-%s-topic-%s" + % (source["organisation"], source["sourceURL"], topic["id"]) + ).encode("ascii", errors="replace") + ).hexdigest() + + # Figure out when topic was created and updated + CreatedDate = datetime.datetime.strptime( + topic["created_at"], "%Y-%m-%dT%H:%M:%S.%fZ" + ).timestamp() + if topic.get("last_posted_at"): + UpdatedDate = datetime.datetime.strptime( + topic["last_posted_at"], "%Y-%m-%dT%H:%M:%S.%fZ" + ).timestamp() + else: + UpdatedDate = 0 + + # Determine whether we should scan this topic or continue to the next one. + # We'll do this by seeing if the topic already exists and has no changes or not. + if KibbleBit.exists("forum_topic", dhash): + fdoc = KibbleBit.get("forum_topic", dhash) + # If update in the old doc was >= current update timestamp, skip the topic + if fdoc["updated"] >= UpdatedDate: + continue + + # Assuming we need to scan this, start by making the base topic document + topicdoc = { + "id": dhash, + "sourceID": source["sourceID"], + "organisation": source["organisation"], + "type": "discourse", + "category": cat["slug"], + "title": topic["title"], + "creator": allUsers[topic["posters"][0]["user_id"]]["id"], + "creatorName": allUsers[topic["posters"][0]["user_id"]]["name"], + "created": CreatedDate, + "createdDate": time.strftime( + "%Y/%m/%d %H:%M:%S", time.gmtime(CreatedDate) + ), + "updated": UpdatedDate, + "solved": False, # Discourse doesn't have this notion, but other forums might. + "posts": topic["posts_count"], + "views": topic["views"], + "url": source["sourceURL"] + + "/t/%s/%s" % (topic["slug"], topic["id"]), + } + + KibbleBit.append("forum_topic", topicdoc) + KibbleBit.pprint("%s is new or changed, scanning" % topicdoc["url"]) + + # Now grab all the individual replies/posts + # Remember to not have it count as a visit! + pURL = "%s?track_visit=false&forceLoad=true" % topicdoc["url"] + pjson = jsonapi.get(pURL, auth=creds) + + posts = pjson["post_stream"]["posts"] + + # For each post/reply, construct a forum_entry document + KibbleBit.pprint("%s has %u posts" % (pURL, len(posts))) + for post in posts: + phash = hashlib.sha224( + ( + "%s-%s-post-%s" + % (source["organisation"], source["sourceURL"], post["id"]) + ).encode("ascii", errors="replace") + ).hexdigest() + uname = ( + post.get("name", post["username"]) or post["username"] + ) # Hack to get longest non-zero value + + # Find the hash of the person who posted it + # We may know them, or we may have to store them. + # If we have better info now (full name), re-store + if ( + post["user_id"] in allUsers + and allUsers[post["user_id"]]["name"] == uname + ): + uhash = allUsers[post["user_id"]]["id"] + else: + # Same as before, fake email, store... + email = "%s@%s" % (post["username"], fakeDomain) + uhash = hashlib.sha224( + ( + "%s-%s-%s" + % (source["organisation"], source["sourceURL"], email) + ).encode("ascii", errors="replace") + ).hexdigest() + + # Construct a very sparse user document + userDoc = { + "id": uhash, + "organisation": source["organisation"], + "name": uname, + "email": email, + } + + # Store user-ID-to-username mapping for later + allUsers[user["id"]] = userDoc + + # Store it (or, queue storage) + KibbleBit.append("person", userDoc) + + # Get post date + CreatedDate = datetime.datetime.strptime( + post["created_at"], "%Y-%m-%dT%H:%M:%S.%fZ" + ).timestamp() + + # Store the post/reply document + pdoc = { + "id": phash, + "sourceID": source["sourceID"], + "organisation": source["organisation"], + "type": "discourse", + "creator": uhash, + "created": CreatedDate, + "createdDate": time.strftime( + "%Y/%m/%d %H:%M:%S", time.gmtime(CreatedDate) + ), + "topic": dhash, + "post_id": post["id"], + "text": post["cooked"], + "url": topicdoc["url"], + } + KibbleBit.append("forum_post", pdoc) + else: + KibbleBit.pprint("Fetching discourse data failed!") + return False + return True + + +class discourseThread(threading.Thread): + """ Generic thread class for scheduling multiple scans at once """ + + def __init__(self, block, KibbleBit, source, creds, jobs): + super(discourseThread, self).__init__() + self.block = block + self.KibbleBit = KibbleBit + self.creds = creds + self.source = source + self.jobs = jobs + + def run(self): + badOnes = 0 + while len(self.jobs) > 0 and badOnes <= 50: + self.block.acquire() + try: + job = self.jobs.pop(0) + except Exception as err: + self.block.release() + return + if not job: + self.block.release() + return + self.block.release() + if not scanJob(self.KibbleBit, self.source, job, self.creds): + self.KibbleBit.pprint( + "[%s] This borked, trying another one" % job["name"] + ) + badOnes += 1 + if badOnes > 10: + self.KibbleBit.pprint("Too many errors, bailing!") + self.source["steps"]["forum"] = { + "time": time.time(), + "status": "Too many errors while parsing at " + + time.strftime("%Y/%m/%d %H:%M:%S", time.gmtime(time.time())), + "running": False, + "good": False, + } + self.KibbleBit.updateSource(self.source) + return + else: + badOnes = 0 + + +def scan(KibbleBit, source): + # Simple URL check + discourse = re.match(r"(https?://.+)", source["sourceURL"]) + if discourse: + + source["steps"]["forum"] = { + "time": time.time(), + "status": "Parsing Discourse topics...", + "running": True, + "good": True, + } + KibbleBit.updateSource(source) + + badOnes = 0 + pendingJobs = [] + KibbleBit.pprint("Parsing Discourse activity at %s" % source["sourceURL"]) + source["steps"]["forum"] = { + "time": time.time(), + "status": "Downloading changeset", + "running": True, + "good": True, + } + KibbleBit.updateSource(source) + + # Discourse may neeed credentials (if basic auth) + creds = None + if ( + source["creds"] + and "username" in source["creds"] + and source["creds"]["username"] + and len(source["creds"]["username"]) > 0 + ): + creds = "%s:%s" % (source["creds"]["username"], source["creds"]["password"]) + + # Get the list of categories + sURL = source["sourceURL"] + KibbleBit.pprint("Getting categories...") + catjs = jsonapi.get("%s/categories_and_latest" % sURL, auth=creds) + + # Directly assign the category list as pending jobs queue, ezpz. + pendingJobs = catjs["category_list"]["categories"] + + KibbleBit.pprint("Found %u categories" % len(pendingJobs)) + + # Now fire off 4 threads to parse the categories + threads = [] + block = threading.Lock() + KibbleBit.pprint("Scanning jobs using 4 sub-threads") + for i in range(0, 4): + t = discourseThread(block, KibbleBit, source, creds, pendingJobs) + threads.append(t) + t.start() + + for t in threads: + t.join() + + # We're all done, yaay + KibbleBit.pprint("Done scanning %s" % source["sourceURL"]) + + source["steps"]["forum"] = { + "time": time.time(), + "status": "Discourse successfully scanned at " + + time.strftime("%Y/%m/%d %H:%M:%S", time.gmtime(time.time())), + "running": False, + "good": True, + } + KibbleBit.updateSource(source) diff --git a/kibble/scanners/scanners/gerrit.py b/kibble/scanners/scanners/gerrit.py new file mode 100644 index 00000000..70f0b90e --- /dev/null +++ b/kibble/scanners/scanners/gerrit.py @@ -0,0 +1,258 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import re +import requests +import hashlib +from dateutil import parser +import time +import json + +title = "Scanner for Gerrit Code Review" +version = "0.1.1" + + +CHANGES_URL = "%s/changes/%s" +PROJECT_LIST_URL = "%s/projects/" +ACCOUNTS_URL = "%s/accounts/%d" +COMMIT_ID_RE = re.compile(" Change-Id: (.*)") + + +def accepts(source): + """ Do we accept this source?? """ + if source["type"] == "gerrit": + return True + return False + + +def getjson(response): + response.raise_for_status() + return json.loads(response.text[4:]) + + +def get(url, params=None): + resp = requests.get(url, params=params) + return getjson(resp) + + +def changes(base_url, params=None): + return get(CHANGES_URL % (base_url, ""), params=params) + + +def change_details(base_url, change): + if isinstance(change, dict): + id = change["change_id"] + else: + id = change + + return get(CHANGES_URL % (base_url, id) + "/detail") + + +def get_commit_id(commit_message): + all = COMMIT_ID_RE.findall(commit_message) + if all: + return all[0] + return None + + +def get_all(base_url, f, params={}): + acc = [] + + while True: + items = f(base_url, params=params) + if not items: + break + + acc.extend(items) + params.update({"start": len(acc)}) + + return acc + + +def format_date(d, epoch=False): + if not d: + return + parsed = parser.parse(d) + + if epoch: + return time.mktime(parsed.timetuple()) + + return time.strftime("%Y/%m/%d %H:%M:%S", parsed.timetuple()) + + +def make_hash(repo, change): + return hashlib.sha224( + ( + "%s-%s-%s" % (repo["organisation"], repo["sourceID"], change["change_id"]) + ).encode("ascii", errors="replace") + ).hexdigest() + + +def is_closed(change): + return change["status"] == "MERGED" or change["status"] == "ABANDONED" + + +def make_issue(repo, base_url, change): + key = change["change_id"] + dhash = make_hash(repo, change) + + closed_date = None + if is_closed(change): + closed_date = change["updated"] + + if not "email" in change["owner"]: + change["owner"]["email"] = "%u@invalid.gerrit" % change["owner"]["_account_id"] + owner_email = change["owner"]["email"] + + messages = [] + for message in change.get("messages", []): + messages.append(message.get("message", "")) + + return { + "id": dhash, + "key": key, + "organisation": repo["organisation"], + "sourceID": repo["sourceID"], + "url": base_url + "/#/q/" + key, + "status": change["status"], + "created": format_date(change["created"], epoch=True), + "closed": format_date(closed_date, epoch=True), + "issueCloser": owner_email, + "createdDate": format_date(change["created"]), + "closedDate": format_date(closed_date), + "changeDate": format_date(closed_date if closed_date else change["created"]), + "assignee": owner_email, + "issueCreator": owner_email, + "comments": len(messages), + "title": change["subject"], + } + + +def make_person(repo, raw_person): + email = raw_person["email"] + id = hashlib.sha1( + ("%s%s" % (repo["organisation"], email)).encode("ascii", errors="replace") + ).hexdigest() + return { + "email": email, + "id": id, + "organisation": repo["organisation"], + "name": raw_person["name"] + if "name" in raw_person + else "%u" % raw_person["_account_id"], + } + + +def update_issue(KibbleBit, issue): + id = issue["id"] + KibbleBit.pprint("Updating issue: " + id) + KibbleBit.index("issue", id, issue) + + +def update_person(KibbleBit, person): + KibbleBit.pprint("Updating person: " + person["name"] + " - " + person["email"]) + KibbleBit.index("person", person["id"], {"doc": person, "doc_as_upsert": True}) + + +def status_changed(stored_change, change): + if not stored_change or not change: + return True + return stored_change["status"] != change["status"] + + +def scan(KibbleBit, source): + source["steps"]["issues"] = { + "time": time.time(), + "status": "Analyzing Gerrit tickets...", + "running": True, + "good": True, + } + KibbleBit.updateSource(source) + + url = source["sourceURL"] + # Try matching foo.bar/r/project/subfoo + m = re.match(r"(.+://.+?/r)/(.+)", url) + if m: + base_url = m.group(1) + project_name = m.group(2) + # Fall back to old splitty split + else: + url = re.sub(r"^git://", "http://", url) + source_parts = url.split("/") + project_name = source_parts[-1] + base_url = "/".join(source_parts[:-1]) # remove the trailing /blah/ + + # TODO: figure out branch from current checkout + q = ( + '(is:open OR is:new OR is:closed OR is:merged OR is:abandoned) AND project:"%s"' + % project_name + ) + all_changes = get_all( + base_url, changes, {"q": q, "o": ["LABELS", "DETAILED_ACCOUNTS"]} + ) + + print("Found " + str(len(all_changes)) + " changes for project: " + project_name) + + people = {} + for change in all_changes: + try: + # TODO: check if needs updating here before getting details + dhash = make_hash(source, change) + + stored_change = None + if KibbleBit.exists("issue", dhash): + stored_change = KibbleBit.get("issue", dhash) + + if not status_changed(stored_change, change): + # print("change %s seen already and status unchanged. Skipping." % + # change['change_id']) + continue + + details = change_details(base_url, change) + + issue_doc = make_issue(source, base_url, details) + update_issue(KibbleBit, issue_doc) + + labels = details["labels"] + change_people = [] + + if "owner" in details: + change_people.append(details["owner"]) + if "Module-Owner" in labels and "all" in labels["Module-Owner"]: + change_people.extend(labels["Module-Owner"]["all"]) + if "Code-Review" in labels and "all" in labels["Code-Review"]: + change_people.extend(labels["Code-Review"]["all"]) + if "Verified" in labels and "all" in labels["Verified"]: + change_people.extend(labels["Verified"]["all"]) + + print(change["change_id"] + " -> " + str(len(change_people)) + " people.") + + for person in change_people: + if "email" in person and person["email"] not in people: + people[person["email"]] = person + update_person(KibbleBit, make_person(source, person)) + + except requests.HTTPError as e: + print(e) + + source["steps"]["issues"] = { + "time": time.time(), + "status": "Done analyzing tickets!", + "running": False, + "good": True, + } + KibbleBit.updateSource(source) diff --git a/kibble/scanners/scanners/git-census.py b/kibble/scanners/scanners/git-census.py new file mode 100644 index 00000000..70cbdbc2 --- /dev/null +++ b/kibble/scanners/scanners/git-census.py @@ -0,0 +1,328 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import os +import re +import subprocess +import time +import tempfile +import hashlib +import email.utils +import datetime, time + +title = "Census Scanner for Git" +version = "0.1.0" + + +def accepts(source): + """ Do we accept this source?? """ + if source["type"] == "git": + return True + # There are cases where we have a github repo, but don't wanna annalyze the code, just issues + if source["type"] == "github" and source.get("issuesonly", False) == False: + return True + return False + + +def scan(KibbleBit, source): + """ Conduct a census scan """ + people = {} + idseries = {} + lcseries = {} + alcseries = {} + ctseries = {} + atseries = {} + + rid = source["sourceID"] + url = source["sourceURL"] + rootpath = "%s/%s/git" % ( + KibbleBit.config["scanner"]["scratchdir"], + source["organisation"], + ) + gpath = os.path.join(rootpath, rid) + + if "steps" in source and source["steps"]["sync"]["good"] and os.path.exists(gpath): + source["steps"]["census"] = { + "time": time.time(), + "status": "Census count started at " + + time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime()), + "running": True, + "good": True, + } + KibbleBit.updateSource(source) + gname = rid + inp = "" + modificationDates = {} + # Did we do a census before? + if "census" in source and source["census"] > 0: + # Go back 2 months, meh... + ts = source["census"] - (62 * 86400) + pd = time.gmtime(ts) + date = time.strftime("%Y-%b-%d 0:00", pd) + inp = subprocess.check_output( + 'git --git-dir %s/.git log --after="%s" --all "--pretty=format:::%%H|%%ce|%%cn|%%ae|%%an|%%ct" --numstat' + % (gpath, date), + shell=True, + ) + else: + inp = subprocess.check_output( + 'git --git-dir %s/.git log --all "--pretty=format:::%%H|%%ce|%%cn|%%ae|%%an|%%ct" --numstat' + % gpath, + shell=True, + ) + tmp = tempfile.NamedTemporaryFile(mode="w+b", buffering=1, delete=False) + tmp.write(inp) + tmp.flush() + tmp.close() + with open(tmp.name, mode="r", encoding="utf-8", errors="replace") as f: + inp = f.read() + f.close() + os.unlink(tmp.name) + edone = 0 + KibbleBit.pprint("Parsing log for %s (%s)..." % (rid, url)) + for m in re.finditer( + u":([a-f0-9]+)\|([^\r\n|]+)\|([^\r\n|]+)\|([^\r\n|]+)\|([^\r\n|]+)\|([\d+]+)\r?\n([^:]+?):", + inp, + flags=re.MULTILINE, + ): + if m: + ch = m.group(1) + ce = m.group(2) + cn = m.group(3) + ae = m.group(4) + an = m.group(5) + ct = int(m.group(6)) + diff = m.group(7) + insert = 0 + delete = 0 + files_touched = set() + # Diffs + for l in re.finditer( + u"(\d+)[ \t]+(\d+)[ \t]+([^\r\n]+)", diff, flags=re.MULTILINE + ): + insert += int(l.group(1)) + delete += int(l.group(2)) + filename = l.group(3) + if filename: + files_touched.update([filename]) + if ( + filename + and len(filename) > 0 + and ( + not filename in modificationDates + or modificationDates[filename]["timestamp"] < ct + ) + ): + modificationDates[filename] = { + "hash": ch, + "filename": filename, + "timestamp": ct, + "created": ct + if ( + not filename in modificationDates + or not "created" in modificationDates[filename] + or modificationDates[filename]["created"] > ct + ) + else modificationDates[filename]["created"], + "author_email": ae, + "committer_email": ce, + } + if insert > 100000000: + insert = 0 + if delete > 100000000: + delete = 0 + if delete > 1000000 or insert > 1000000: + KibbleBit.pprint( + "gigantic diff for %s (%s), ignoring" + % (gpath, source["sourceURL"]) + ) + pass + if not gname in idseries: + idseries[gname] = {} + if not gname in lcseries: + lcseries[gname] = {} + if not gname in alcseries: + alcseries[gname] = {} + if not gname in ctseries: + ctseries[gname] = {} + if not gname in atseries: + atseries[gname] = {} + ts = ct - (ct % 86400) + if not ts in idseries[gname]: + idseries[gname][ts] = [0, 0] + + idseries[gname][ts][0] += insert + idseries[gname][ts][1] += delete + + if not ts in lcseries[gname]: + lcseries[gname][ts] = {} + if not ts in alcseries[gname]: + alcseries[gname][ts] = {} + if not ce in lcseries[gname][ts]: + lcseries[gname][ts][ce] = [0, 0] + lcseries[gname][ts][ce][0] = lcseries[gname][ts][ce][0] + insert + lcseries[gname][ts][ce][1] = lcseries[gname][ts][ce][0] + delete + + if not ae in alcseries[gname][ts]: + alcseries[gname][ts][ae] = [0, 0] + alcseries[gname][ts][ae][0] = alcseries[gname][ts][ae][0] + insert + alcseries[gname][ts][ae][1] = alcseries[gname][ts][ae][0] + delete + + if not ts in ctseries[gname]: + ctseries[gname][ts] = {} + if not ts in atseries[gname]: + atseries[gname][ts] = {} + + if not ce in ctseries[gname][ts]: + ctseries[gname][ts][ce] = 0 + ctseries[gname][ts][ce] += 1 + + if not ae in atseries[gname][ts]: + atseries[gname][ts][ae] = 0 + atseries[gname][ts][ae] += 1 + + # Committer + if not ce in people or len(people[ce]["name"]) < len(cn): + people[ce] = people[ce] if ce in people else {"projects": [gname]} + people[ce]["name"] = cn + if not gname in people[ce]["projects"]: + people[ce]["projects"].append(gname) + + # Author + if not ae in people or len(people[ae]["name"]) < len(an): + people[ae] = people[ae] if ae in people else {"projects": [gname]} + people[ae]["name"] = an + if not gname in people[ae]["projects"]: + people[ae]["projects"].append(gname) + + # Make a list of changed files, max 1024 + filelist = list(files_touched) + filelist = filelist[:1023] + + # ES commit documents + tsd = ts - (ts % 86400) + js = { + "id": rid + "/" + ch, + "sourceID": rid, + "sourceURL": source["sourceURL"], + "organisation": source["organisation"], + "ts": ct, + "tsday": tsd, + "date": time.strftime("%Y/%m/%d %H:%M:%S", time.gmtime(ct)), + "committer_name": cn, + "committer_email": ce, + "author_name": an, + "author_email": ae, + "insertions": insert, + "deletions": delete, + "vcs": "git", + "files_changed": filelist, + } + jsx = { + "id": ch, + "organisation": source["organisation"], + "sourceID": source[ + "sourceID" + ], # Only ever the last source with this + "ts": ct, + "tsday": tsd, + "date": time.strftime("%Y/%m/%d %H:%M:%S", time.gmtime(ct)), + "committer_name": cn, + "committer_email": ce, + "author_name": an, + "author_email": ae, + "insertions": insert, + "deletions": delete, + "repository": rid, # This will always ever only be the last repo that had it! + "vcs": "git", + "files_changed": filelist, + } + KibbleBit.append( + "person", + { + "upsert": True, + "name": cn, + "email": ce, + "address": ce, + "organisation": source["organisation"], + "id": hashlib.sha1( + ("%s%s" % (source["organisation"], ce)).encode( + "ascii", errors="replace" + ) + ).hexdigest(), + }, + ) + KibbleBit.append( + "person", + { + "upsert": True, + "name": an, + "email": ae, + "address": ae, + "organisation": source["organisation"], + "id": hashlib.sha1( + ("%s%s" % (source["organisation"], ae)).encode( + "ascii", errors="replace" + ) + ).hexdigest(), + }, + ) + KibbleBit.append("code_commit", js) + KibbleBit.append("code_commit_unique", jsx) + + if True: # Do file changes?? Might wanna make this optional + KibbleBit.pprint("Scanning file changes for %s" % source["sourceURL"]) + for filename in modificationDates: + fid = hashlib.sha1( + ("%s/%s" % (source["sourceID"], filename)).encode( + "ascii", errors="replace" + ) + ).hexdigest() + jsfe = { + "upsert": True, + "id": fid, + "organisation": source["organisation"], + "sourceID": source["sourceID"], + "ts": modificationDates[filename]["timestamp"], + "date": time.strftime( + "%Y/%m/%d %H:%M:%S", + time.gmtime(modificationDates[filename]["timestamp"]), + ), + "committer_email": modificationDates[filename]["committer_email"], + "author_email": modificationDates[filename]["author_email"], + "hash": modificationDates[filename]["hash"], + "created": modificationDates[filename]["created"], + "createdDate": time.strftime( + "%Y/%m/%d %H:%M:%S", + time.gmtime(modificationDates[filename]["created"]), + ), + } + found = KibbleBit.exists("file_history", fid) + if found: + del jsfe["created"] + del jsfe["createdDate"] + KibbleBit.append("file_history", jsfe) + + source["steps"]["census"] = { + "time": time.time(), + "status": "Census count completed at " + + time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime()), + "running": False, + "good": True, + } + source["census"] = time.time() + KibbleBit.updateSource(source) diff --git a/kibble/scanners/scanners/git-evolution.py b/kibble/scanners/scanners/git-evolution.py new file mode 100644 index 00000000..6ce2fe97 --- /dev/null +++ b/kibble/scanners/scanners/git-evolution.py @@ -0,0 +1,259 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +""" Git Evolution scanner """ +import os +import subprocess +import time +import calendar +import datetime +import hashlib + +from kibble.scanners.utils import sloc + +title = "Git Evolution Scanner" +version = "0.1.0" + + +def accepts(source): + """ Do we accept this source? """ + if source["type"] == "git": + return True + # There are cases where we have a github repo, but don't wanna annalyze the code, just issues + if source["type"] == "github" and source.get("issuesonly", False) == False: + return True + return False + + +def get_first_ref(gpath): + try: + return subprocess.check_output( + "cd %s && git log `git rev-list --max-parents=0 HEAD` --pretty=format:%%ct" + % gpath, + shell=True, + ) + except: + print("Could not get first ref, exiting!") + return None + + +def acquire(KibbleBit, source): + source["steps"]["evolution"] = { + "time": time.time(), + "status": "Evolution scan started at " + + time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime()), + "running": True, + "good": True, + } + KibbleBit.updateSource(source) + + +def release(KibbleBit, source, status, exception=None, good=False): + source["steps"]["evolution"] = { + "time": time.time(), + "status": status, + "running": False, + "good": good, + } + + if exception: + source["steps"]["evolution"].update({"exception": exception}) + KibbleBit.updateSource(source) + + +def check_branch(gpath, date, branch): + try: + subprocess.check_call( + 'cd %s && git rev-list -n 1 --before="%s" %s' % (gpath, date, branch), + shell=True, + ) + return True + except: + return False + + +def checkout(gpath, date, branch): + # print("Ready to cloc...checking out %s " % date) + try: + ref = ( + subprocess.check_output( + 'cd %s && git rev-list -n 1 --before="%s" "%s"' % (gpath, date, branch), + shell=True, + stderr=subprocess.STDOUT, + ) + .decode("ascii", "replace") + .strip() + ) + subprocess.check_output( + "cd %s && git checkout %s -- " % (gpath, ref), + shell=True, + stderr=subprocess.STDOUT, + ) + except subprocess.CalledProcessError as err: + print(err.output) + + +def find_branch(date, gpath): + try: + os.chdir(gpath) + subprocess.check_call( + 'cd %s && git rev-list -n 1 --before="%s" master' % (gpath, date), + shell=True, + stderr=subprocess.DEVNULL, + ) + return "master" + except: + os.chdir(gpath) + branch = "" + try: + return ( + subprocess.check_output( + "cd %s && git rev-parse --abbrev-ref HEAD" % gpath, + shell=True, + stderr=subprocess.DEVNULL, + ) + .decode("ascii", "replace") + .strip() + .strip("* ") + ) + except: + # print("meh! no branch") + return None + + +def scan(KibbleBit, source): + + rid = source["sourceID"] + url = source["sourceURL"] + rootpath = "%s/%s/git" % ( + KibbleBit.config["scanner"]["scratchdir"], + source["organisation"], + ) + gpath = os.path.join(rootpath, rid) + + gname = source["sourceID"] + KibbleBit.pprint("Doing evolution scan of %s" % gname) + + inp = get_first_ref(gpath) + if inp: + ts = int(inp.split()[0]) + ts = ts - (ts % 86400) + date = time.strftime("%Y-%b-%d 0:00", time.gmtime(ts)) + + # print("Starting from %s" % date) + now = time.time() + + rid = source["sourceID"] + url = source["sourceURL"] + rootpath = "%s/%s/git" % ( + KibbleBit.config["scanner"]["scratchdir"], + source["organisation"], + ) + gpath = os.path.join(rootpath, rid) + + if source["steps"]["sync"]["good"] and os.path.exists(gpath): + acquire(KibbleBit, source) + branch = find_branch(date, gpath) + + if not branch: + release( + source, + "Could not do evolutionary scan of code", + "No default branch was found in this repository", + ) + return + + branch_exists = check_branch(gpath, date, branch) + + if not branch_exists: + KibbleBit.pprint("Not trunk either (bad repo?), skipping") + release( + source, + "Could not do evolutionary scan of code", + "No default branch was found in this repository", + ) + return + + try: + + d = time.gmtime(now) + year = d[0] + quarter = d[1] - (d[1] % 3) + if quarter <= 0: + quarter += 12 + year -= 1 + while now > ts: + pd = ( + datetime.datetime(year, quarter, 1) + .replace(tzinfo=datetime.timezone.utc) + .timetuple() + ) + date = time.strftime("%Y-%b-%d 0:00", pd) + unix = calendar.timegm(pd) + + # Skip the dates we've already processed + dhash = hashlib.sha224( + (source["sourceID"] + date).encode("ascii", "replace") + ).hexdigest() + found = KibbleBit.exists("evolution", dhash) + if not found: + checkout(gpath, date, branch) + KibbleBit.pprint( + "Running cloc on %s (%s) at %s" + % (gname, source["sourceURL"], date) + ) + languages, codecount, comment, blank, years, cost = sloc.count( + gpath + ) + js = { + "time": unix, + "sourceID": source["sourceID"], + "sourceURL": source["sourceURL"], + "organisation": source["organisation"], + "loc": codecount, + "comments": comment, + "blank": blank, + "years": years, + "cost": cost, + "languages": languages, + } + KibbleBit.index("evolution", dhash, js) + quarter -= 3 + if quarter <= 0: + quarter += 12 + year -= 1 + + # decrease month by 3 + now = time.mktime(datetime.date(year, quarter, 1).timetuple()) + except Exception as e: + KibbleBit.pprint(e) + release( + KibbleBit, + source, + "Evolution scan failed at " + + time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime()), + str(e), + ) + return + + release( + KibbleBit, + source, + "Evolution scan completed at " + + time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime()), + good=True, + ) diff --git a/kibble/scanners/scanners/git-sloc.py b/kibble/scanners/scanners/git-sloc.py new file mode 100644 index 00000000..f73cf860 --- /dev/null +++ b/kibble/scanners/scanners/git-sloc.py @@ -0,0 +1,87 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import os +import subprocess +import time + +from kibble.scanners.utils import sloc, git + +""" Source Lines of Code counter for Git """ + +title = "SloC Counter for Git" +version = "0.1.0" + + +def accepts(source): + """ Do we accept this source? """ + if source["type"] == "git": + return True + # There are cases where we have a github repo, but don't wanna annalyze the code, just issues + if source["type"] == "github" and source.get("issuesonly", False) == False: + return True + return False + + +def scan(KibbleBit, source): + + rid = source["sourceID"] + url = source["sourceURL"] + rootpath = "%s/%s/git" % ( + KibbleBit.config["scanner"]["scratchdir"], + source["organisation"], + ) + gpath = os.path.join(rootpath, rid) + + if source["steps"]["sync"]["good"] and os.path.exists(gpath): + source["steps"]["count"] = { + "time": time.time(), + "status": "SLoC count started at " + + time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime()), + "running": True, + "good": True, + } + KibbleBit.updateSource(source) + + try: + branch = git.defaultBranch(source, gpath) + subprocess.call("cd %s && git checkout %s" % (gpath, branch), shell=True) + except: + KibbleBit.pprint("SLoC counter failed to find main branch for %s!!" % url) + return False + + KibbleBit.pprint("Running SLoC count for %s" % url) + languages, codecount, comment, blank, years, cost = sloc.count(gpath) + + sloc_ = { + "sourceID": source["sourceID"], + "loc": codecount, + "comments": comment, + "blanks": blank, + "years": years, + "cost": cost, + "languages": languages, + } + source["sloc"] = sloc_ + source["steps"]["count"] = { + "time": time.time(), + "status": "SLoC count completed at " + + time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime()), + "running": False, + "good": True, + } + KibbleBit.updateSource(source) diff --git a/kibble/scanners/scanners/git-sync.py b/kibble/scanners/scanners/git-sync.py new file mode 100644 index 00000000..7956847b --- /dev/null +++ b/kibble/scanners/scanners/git-sync.py @@ -0,0 +1,174 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import os +import subprocess +import time + +from kibble.scanners.utils import git + +title = "Sync plugin for Git repositories" +version = "0.1.2" + + +def accepts(source): + """ Do we accept this source? """ + if source["type"] == "git": + return True + # There are cases where we have a github repo, but don't wanna annalyze the code, just issues + if source["type"] == "github" and source.get("issuesonly", False) == False: + return True + return False + + +def scan(KibbleBit, source): + + # Get some vars, construct a data path for the repo + path = source["sourceID"] + url = source["sourceURL"] + rootpath = "%s/%s/git" % ( + KibbleBit.config["scanner"]["scratchdir"], + source["organisation"], + ) + + # If the root path does not exist, try to make it recursively. + if not os.path.exists(rootpath): + try: + os.makedirs(rootpath, exist_ok=True) + print("Created root path %s" % rootpath) + except Exception as err: + source["steps"]["sync"] = { + "time": time.time(), + "status": "Could not create root scratch dir - permision denied?", + "running": False, + "good": False, + } + KibbleBit.updateSource(source) + return + + # This is were the repo should be cloned + datapath = os.path.join(rootpath, path) + + KibbleBit.pprint("Checking out %s as %s" % (url, path)) + + try: + source["steps"]["sync"] = { + "time": time.time(), + "status": "Fetching code data from source location...", + "running": True, + "good": True, + } + KibbleBit.updateSource(source) + + # If we already checked this out earlier, just sync it. + if os.path.exists(datapath): + KibbleBit.pprint("Repo %s exists, fetching changes..." % datapath) + + # Do we have a default branch here? + branch = git.defaultBranch(source, datapath, KibbleBit) + if len(branch) == 0: + source["default_branch"] = branch + source["steps"]["sync"] = { + "time": time.time(), + "status": "Could not sync with source", + "exception": "No default branch was found in this repository", + "running": False, + "good": False, + } + KibbleBit.updateSource(source) + KibbleBit.pprint( + "No default branch found for %s (%s)" + % (source["sourceID"], source["sourceURL"]) + ) + return + + KibbleBit.pprint("Using branch %s" % branch) + # Try twice checking out the main branch and fetching changes. + # Sometimes we need to clean up after older scanners, which is + # why we try twice. If first attempt fails, clean up and try again. + for n in range(0, 2): + try: + subprocess.check_output( + "GIT_TERMINAL_PROMPT=0 cd %s && git checkout %s && git fetch --all && git merge -X theirs --no-edit" + % (datapath, branch), + shell=True, + stderr=subprocess.STDOUT, + ) + break + except subprocess.CalledProcessError as err: + e = str(err.output).lower() + # We're interested in merge conflicts, which we can resolve through trickery. + if n > 0 or not ( + "resolve" in e or "merge" in e or "overwritten" in e + ): + # This isn't a merge conflict, pass it to outer func + raise err + else: + # Switch to first commit + fcommit = subprocess.check_output( + "cd %s && git rev-list --max-parents=0 --abbrev-commit HEAD" + % datapath, + shell=True, + stderr=subprocess.STDOUT, + ) + fcommit = fcommit.decode("ascii").strip() + subprocess.check_call( + "cd %s && git reset --hard %s" % (datapath, fcommit), + shell=True, + stderr=subprocess.STDOUT, + ) + try: + subprocess.check_call( + "cd %s && git clean -xfd" % datpath, + shell=True, + stderr=subprocess.STDOUT, + ) + except: + pass + # This is a new repo, clone it! + else: + KibbleBit.pprint("%s is new, cloning...!" % datapath) + subprocess.check_output( + "GIT_TERMINAL_PROMPT=0 cd %s && git clone %s %s" + % (rootpath, url, path), + shell=True, + stderr=subprocess.STDOUT, + ) + + except subprocess.CalledProcessError as err: + KibbleBit.pprint("Repository sync failed (no master?)") + KibbleBit.pprint(str(err.output)) + source["steps"]["sync"] = { + "time": time.time(), + "status": "Sync failed at " + + time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime()), + "running": False, + "good": False, + "exception": str(err.output), + } + KibbleBit.updateSource(source) + return + + # All good, yay! + source["steps"]["sync"] = { + "time": time.time(), + "status": "Source code fetched successfully at " + + time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime()), + "running": False, + "good": True, + } + KibbleBit.updateSource(source) diff --git a/kibble/scanners/scanners/github-issues.py b/kibble/scanners/scanners/github-issues.py new file mode 100644 index 00000000..1b5f0bbb --- /dev/null +++ b/kibble/scanners/scanners/github-issues.py @@ -0,0 +1,245 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import re +import hashlib +from dateutil import parser +import time +import requests + +from kibble.scanners.utils import github + +title = "Scanner for GitHub Issues" +version = "0.1.0" + + +def accepts(source): + """ Return true if this is a github repo """ + if source["type"] == "github": + return True + if source["type"] == "git" and re.match( + r"https://(?:www\.)?github.com/", source["sourceURL"] + ): + return True + return False + + +def format_date(d, epoch=False): + if not d: + return + parsed = parser.parse(d) + + if epoch: + return time.mktime(parsed.timetuple()) + + return time.strftime("%Y/%m/%d %H:%M:%S", parsed.timetuple()) + + +def make_hash(source, issue): + return hashlib.sha224( + ( + "%s-%s-%s" % (source["organisation"], source["sourceID"], str(issue["id"])) + ).encode("ascii", errors="replace") + ).hexdigest() + + +def make_issue(source, issue, people): + + key = str(issue["number"]) + dhash = make_hash(source, issue) + + closed_date = issue.get("closed_at", None) + + owner_email = people[issue["user"]["login"]]["email"] + + issue_closer = owner_email + if "closed_by" in issue: + issue_closer = people[issue["closed_by"]["login"]] + # Is this an issue ro a pull request? + itype = "issue" + if "pull_request" in issue: + itype = "pullrequest" + labels = [] + for l in issue.get("labels", []): + labels.append(l["name"]) + return { + "id": dhash, + "key": key, + "issuetype": itype, + "organisation": source["organisation"], + "sourceID": source["sourceID"], + "url": issue["html_url"], + "status": issue["state"], + "labels": labels, + "created": format_date(issue["created_at"], epoch=True), + "closed": format_date(closed_date, epoch=True), + "issueCloser": issue_closer, + "createdDate": format_date(issue["created_at"]), + "closedDate": format_date(closed_date), + "changeDate": format_date(closed_date if closed_date else issue["updated_at"]), + "assignee": owner_email, + "issueCreator": owner_email, + "comments": issue["comments"], + "title": issue["title"], + } + + +def make_person(source, issue, raw_person): + email = raw_person["email"] + if not email: + email = "%s@invalid.github.com" % issue["user"]["login"] + + name = raw_person["name"] + if not name: + name = raw_person["login"] + + id = hashlib.sha1( + ("%s%s" % (source["organisation"], email)).encode("ascii", errors="replace") + ).hexdigest() + + return { + "email": email, + "id": id, + "organisation": source["organisation"], + "name": name, + } + + +def status_changed(stored_issue, issue): + return stored_issue["status"] != issue["status"] + + +def update_issue(KibbleBit, issue): + KibbleBit.append("issue", issue) + + +def update_person(KibbleBit, person): + person["upsert"] = True + KibbleBit.append("person", person) + + +def scan(KibbleBit, source, firstAttempt=True): + auth = None + people = {} + if "creds" in source: + KibbleBit.pprint("Using auth for repo %s" % source["sourceURL"]) + creds = source["creds"] + if creds and "username" in creds: + auth = (creds["username"], creds["password"]) + TL = github.get_tokens_left(auth=auth) + KibbleBit.pprint("Scanning for GitHub issues (%u tokens left on GitHub)" % TL) + # Have we scanned before? If so, only do a 3 month scan here. + doneBefore = False + if source.get("steps") and source["steps"].get("issues"): + doneBefore = True + source["steps"]["issues"] = { + "time": time.time(), + "status": "Issue scan started at " + + time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime()), + "running": True, + "good": True, + } + KibbleBit.updateSource(source) + try: + if doneBefore: + since = time.strftime( + "%Y-%m-%dT%H:%M:%SZ", time.gmtime(time.time() - (3 * 30 * 86400)) + ) + KibbleBit.pprint("Fetching changes since %s" % since) + issues = github.get_all( + source, + github.issues, + params={"filter": "all", "state": "all", "since": since}, + auth=auth, + ) + else: + issues = github.get_all( + source, + github.issues, + params={"filter": "all", "state": "all"}, + auth=auth, + ) + KibbleBit.pprint( + "Fetched %s issues for %s" % (str(len(issues)), source["sourceURL"]) + ) + + for issue in issues: + + if not issue["user"]["login"] in people: + person = make_person( + source, issue, github.user(issue["user"]["url"], auth=auth) + ) + people[issue["user"]["login"]] = person + update_person(KibbleBit, person) + + if "closed_by" in issue and not issue["closed_by"]["login"] in people: + closer = make_person( + source, issue, github.user(issue["closed_by"]["url"], auth=auth) + ) + people[issue["closed_by"]["login"]] = closer + update_person(KibbleBit, closer) + + doc = make_issue(source, issue, people) + dhash = doc["id"] + stored_change = None + if KibbleBit.exists("issue", dhash): + es_doc = KibbleBit.get("issue", dhash) + if not status_changed(es_doc, doc): + # KibbleBit.pprint("change %s seen already and status unchanged. Skipping." % issue['id']) + continue + + update_issue(KibbleBit, doc) + + source["steps"]["issues"] = { + "time": time.time(), + "status": "Issue scan completed at " + + time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime()), + "running": False, + "good": True, + } + KibbleBit.updateSource(source) + + except requests.HTTPError as e: + # If we errored out because of rate limiting, retry later, otherwise bail + if firstAttempt: + sleeps = 0 + if github.get_tokens_left(auth=auth) < 10: + KibbleBit.pprint("Hit rate limits, trying to sleep it off!") + while github.get_tokens_left(auth=auth) < 10: + sleeps += 1 + if sleeps > 24: + KibbleBit.pprint( + "Slept for too long without finding a reset rate limit, giving up!" + ) + break + time.sleep(300) # Sleep 5 min, then check again.. + # If we have tokens, try one more time... + if github.get_tokens_left(auth=auth) > 10: + scan(KibbleBit, source, False) # If this one fails, bail completely + return + + KibbleBit.pprint("HTTP Error, rate limit exceeded?") + source["steps"]["issues"] = { + "time": time.time(), + "status": "Issue scan failed at " + + time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime()) + + ": " + + e.response.text, + "running": False, + "good": False, + } + KibbleBit.updateSource(source) diff --git a/kibble/scanners/scanners/github-stats.py b/kibble/scanners/scanners/github-stats.py new file mode 100644 index 00000000..5f993dc4 --- /dev/null +++ b/kibble/scanners/scanners/github-stats.py @@ -0,0 +1,139 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import hashlib +import re +import time + +from kibble.scanners.utils import github + +title = "Traffic statistics plugin for GitHub repositories" +version = "0.1.0" + + +def accepts(source): + """ Do we accept this source? """ + if source["type"] == "github": + return True + return False + + +def getTime(string): + """ Convert GitHub timestamp to epoch """ + return time.mktime( + time.strptime(re.sub(r"Z", "", str(string)), "%Y-%m-%dT%H:%M:%S") + ) + + +def scan(KibbleBit, source): + + # Get some vars, construct a data path for the repo + path = source["sourceID"] + url = source["sourceURL"] + + auth = None + people = {} + if "creds" in source: + KibbleBit.pprint("Using auth for repo %s" % source["sourceURL"]) + creds = source["creds"] + if creds and "username" in creds: + auth = (creds["username"], creds["password"]) + else: + KibbleBit.pprint( + "GitHub stats requires auth, none provided. Ignoring this repo." + ) + return + try: + source["steps"]["stats"] = { + "time": time.time(), + "status": "Fetching statistics from source location...", + "running": True, + "good": True, + } + KibbleBit.updateSource(source) + + # Get views + views = github.views(url, auth) + if "views" in views: + for el in views["views"]: + ts = getTime(el["timestamp"]) + shash = hashlib.sha224( + ( + "%s-%s-%s-clones" + % (source["organisation"], url, el["timestamp"]) + ).encode("ascii", errors="replace") + ).hexdigest() + bit = { + "organisation": source["organisation"], + "sourceURL": source["sourceURL"], + "sourceID": source["sourceID"], + "date": time.strftime("%Y/%m/%d %H:%M:%S", time.gmtime(ts)), + "count": el["count"], + "uniques": el["uniques"], + "ghtype": "views", + "id": shash, + } + KibbleBit.append("ghstats", bit) + + # Get clones + clones = github.clones(url, auth) + if "clones" in clones: + for el in clones["clones"]: + ts = getTime(el["timestamp"]) + shash = hashlib.sha224( + ( + "%s-%s-%s-clones" + % (source["organisation"], url, el["timestamp"]) + ).encode("ascii", errors="replace") + ).hexdigest() + bit = { + "organisation": source["organisation"], + "sourceURL": source["sourceURL"], + "sourceID": source["sourceID"], + "date": time.strftime("%Y/%m/%d %H:%M:%S", time.gmtime(ts)), + "count": el["count"], + "uniques": el["uniques"], + "ghtype": "clones", + "id": shash, + } + KibbleBit.append("ghstats", bit) + + # Get referrers + refs = github.referrers(url, auth) + if refs: + for el in refs: + el["timestamp"] = time.strftime("%Y-%m-%dT%H:%M:%S", time.time()) + ts = getTime(el["timestamp"]) + shash = hashlib.sha224( + ( + "%s-%s-%s-refs" % (source["organisation"], url, el["timestamp"]) + ).encode("ascii", errors="replace") + ).hexdigest() + bit = { + "organisation": source["organisation"], + "sourceURL": source["sourceURL"], + "sourceID": source["sourceID"], + "date": time.strftime("%Y/%m/%d %H:%M:%S", time.gmtime(ts)), + "count": el["count"], + "uniques": el["uniques"], + "ghtype": "referrers", + "id": shash, + } + KibbleBit.append("ghstats", bit) + except: + pass + # All done! diff --git a/kibble/scanners/scanners/jenkins.py b/kibble/scanners/scanners/jenkins.py new file mode 100644 index 00000000..e9833de0 --- /dev/null +++ b/kibble/scanners/scanners/jenkins.py @@ -0,0 +1,356 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import time +import datetime +import re +import hashlib +import threading +import urllib.parse + +from kibble.scanners.utils import jsonapi + +""" +This is the Kibble Jenkins scanner plugin. +""" + +title = "Scanner for Jenkins CI" +version = "0.1.0" + + +def accepts(source): + """ Determines whether we want to handle this source """ + if source["type"] == "jenkins": + return True + return False + + +def scanJob(KibbleBit, source, job, creds): + """ Scans a single job for activity """ + NOW = int(datetime.datetime.utcnow().timestamp()) + jname = job["name"] + if job.get("folder"): + jname = job.get("folder") + "-" + job["name"] + dhash = hashlib.sha224( + ("%s-%s-%s" % (source["organisation"], source["sourceURL"], jname)).encode( + "ascii", errors="replace" + ) + ).hexdigest() + found = True + doc = None + parseIt = False + found = KibbleBit.exists("cijob", dhash) + + # Get $jenkins/job/$job-name/json... + jobURL = ( + "%s/api/json?depth=2&tree=builds[number,status,timestamp,id,result,duration]" + % job["fullURL"] + ) + KibbleBit.pprint(jobURL) + jobjson = jsonapi.get(jobURL, auth=creds) + + # If valid JSON, ... + if jobjson: + for build in jobjson.get("builds", []): + buildhash = hashlib.sha224( + ( + "%s-%s-%s-%s" + % (source["organisation"], source["sourceURL"], jname, build["id"]) + ).encode("ascii", errors="replace") + ).hexdigest() + builddoc = None + try: + builddoc = KibbleBit.get("ci_build", buildhash) + except: + pass + + # If this build already completed, no need to parse it again + if builddoc and builddoc.get("completed", False): + continue + + KibbleBit.pprint( + "[%s-%s] This is new or pending, analyzing..." % (jname, build["id"]) + ) + + completed = True if build["result"] else False + + # Estimate time spent in queue + queuetime = 0 + TS = int(build["timestamp"] / 1000) + if builddoc: + queuetime = builddoc.get("queuetime", 0) + if not completed: + queuetime = NOW - TS + + # Get build status (success, failed, canceled etc) + status = "building" + if build["result"] in ["SUCCESS", "STABLE"]: + status = "success" + if build["result"] in ["FAILURE", "UNSTABLE"]: + status = "failed" + if build["result"] in ["ABORTED"]: + status = "aborted" + + # Calc when the build finished (jenkins doesn't show this) + if completed: + FIN = int(build["timestamp"] + build["duration"]) / 1000 + else: + FIN = 0 + + doc = { + # Build specific data + "id": buildhash, + "date": time.strftime("%Y/%m/%d %H:%M:%S", time.gmtime(FIN)), + "buildID": build["id"], + "completed": completed, + "duration": build["duration"], + "job": jname, + "jobURL": jobURL, + "status": status, + "started": int(build["timestamp"] / 1000), + "ci": "jenkins", + "queuetime": queuetime, + # Standard docs values + "sourceID": source["sourceID"], + "organisation": source["organisation"], + "upsert": True, + } + KibbleBit.append("ci_build", doc) + # Yay, it worked! + return True + + # Boo, it failed! + KibbleBit.pprint("Fetching job data failed!") + return False + + +class jenkinsThread(threading.Thread): + """ Generic thread class for scheduling multiple scans at once """ + + def __init__(self, block, KibbleBit, source, creds, jobs): + super(jenkinsThread, self).__init__() + self.block = block + self.KibbleBit = KibbleBit + self.creds = creds + self.source = source + self.jobs = jobs + + def run(self): + badOnes = 0 + while len(self.jobs) > 0 and badOnes <= 50: + self.block.acquire() + try: + job = self.jobs.pop(0) + except Exception as err: + self.block.release() + return + if not job: + self.block.release() + return + self.block.release() + jfolder = job.get("folder") + ssource = dict(self.source) + if jfolder: + ssource["sourceURL"] += "/job/" + jfolder + if not scanJob(self.KibbleBit, ssource, job, self.creds): + self.KibbleBit.pprint( + "[%s] This borked, trying another one" % job["name"] + ) + badOnes += 1 + if badOnes > 100: + self.KibbleBit.pprint("Too many errors, bailing!") + self.source["steps"]["issues"] = { + "time": time.time(), + "status": "Too many errors while parsing at " + + time.strftime("%Y/%m/%d %H:%M:%S", time.gmtime(time.time())), + "running": False, + "good": False, + } + self.KibbleBit.updateSource(self.source) + return + else: + badOnes = 0 + + +def scan(KibbleBit, source): + # Simple URL check + jenkins = re.match(r"(https?://.+)", source["sourceURL"]) + if jenkins: + + source["steps"]["jenkins"] = { + "time": time.time(), + "status": "Parsing Jenkins job changes...", + "running": True, + "good": True, + } + KibbleBit.updateSource(source) + + badOnes = 0 + pendingJobs = [] + KibbleBit.pprint("Parsing Jenkins activity at %s" % source["sourceURL"]) + source["steps"]["issues"] = { + "time": time.time(), + "status": "Downloading changeset", + "running": True, + "good": True, + } + KibbleBit.updateSource(source) + + # Jenkins may neeed credentials + creds = None + if ( + source["creds"] + and "username" in source["creds"] + and source["creds"]["username"] + and len(source["creds"]["username"]) > 0 + ): + creds = "%s:%s" % (source["creds"]["username"], source["creds"]["password"]) + + # Get the job list + sURL = source["sourceURL"] + KibbleBit.pprint("Getting job list...") + jobsjs = jsonapi.get( + "%s/api/json?tree=jobs[name,color]&depth=1" % sURL, auth=creds + ) + + # Get the current queue + KibbleBit.pprint("Getting job queue...") + queuejs = jsonapi.get("%s/queue/api/json?depth=1" % sURL, auth=creds) + + # Save queue snapshot + NOW = int(datetime.datetime.utcnow().timestamp()) + queuehash = hashlib.sha224( + ( + "%s-%s-queue-%s" + % (source["organisation"], source["sourceURL"], int(time.time())) + ).encode("ascii", errors="replace") + ).hexdigest() + + # Scan queue items + blocked = 0 + stuck = 0 + totalqueuetime = 0 + items = queuejs.get("items", []) + + for item in items: + if item["blocked"]: + blocked += 1 + if item["stuck"]: + stuck += 1 + if "inQueueSince" in item: + totalqueuetime += NOW - int(item["inQueueSince"] / 1000) + + avgqueuetime = totalqueuetime / max(1, len(items)) + + # Count how many jobs are building, find any folders... + actual_jobs, building = get_all_jobs( + KibbleBit, source, jobsjs.get("jobs", []), creds + ) + + # Write up a queue doc + queuedoc = { + "id": queuehash, + "date": time.strftime("%Y/%m/%d %H:%M:%S", time.gmtime(NOW)), + "time": NOW, + "building": building, + "size": len(items), + "blocked": blocked, + "stuck": stuck, + "avgwait": avgqueuetime, + "ci": "jenkins", + # Standard docs values + "sourceID": source["sourceID"], + "organisation": source["organisation"], + "upsert": True, + } + KibbleBit.append("ci_queue", queuedoc) + + pendingJobs = actual_jobs + KibbleBit.pprint("Found %u jobs in Jenkins" % len(pendingJobs)) + + threads = [] + block = threading.Lock() + KibbleBit.pprint("Scanning jobs using 4 sub-threads") + for i in range(0, 4): + t = jenkinsThread(block, KibbleBit, source, creds, pendingJobs) + threads.append(t) + t.start() + + for t in threads: + t.join() + + # We're all done, yaay + KibbleBit.pprint("Done scanning %s" % source["sourceURL"]) + + source["steps"]["issues"] = { + "time": time.time(), + "status": "Jenkins successfully scanned at " + + time.strftime("%Y/%m/%d %H:%M:%S", time.gmtime(time.time())), + "running": False, + "good": True, + } + KibbleBit.updateSource(source) + + +def get_all_jobs(KibbleBit, source, joblist, creds): + real_jobs = [] + building = 0 + for job in joblist: + # Is this a job folder? + jclass = job.get("_class") + if jclass in [ + "jenkins.branch.OrganizationFolder", + "org.jenkinsci.plugins.workflow.multibranch.WorkflowMultiBranchProject", + ]: + KibbleBit.pprint("%s is a jobs folder, expanding..." % job["name"]) + csURL = "%s/job/%s" % ( + source["sourceURL"], + urllib.parse.quote(job["name"].replace("/", "%2F")), + ) + try: + child_jobs = jsonapi.get( + "%s/api/json?tree=jobs[name,color]&depth=1" % csURL, auth=creds + ) + csource = dict(source) + csource["sourceURL"] = csURL + if not csource.get("folder"): + csource["folder"] = job["name"] + else: + csource["folder"] += "-" + job["name"] + cjobs, cbuilding = get_all_jobs( + KibbleBit, csource, child_jobs.get("jobs", []), creds + ) + building += cbuilding + for cjob in cjobs: + real_jobs.append(cjob) + except: + KibbleBit.pprint("Couldn't get child jobs, bailing") + print("%s/api/json?tree=jobs[name,color]&depth=1" % csURL) + # Or standard job? + else: + # Is it building? + if "anime" in job.get( + "color", "" + ): # a running job will have foo_anime as color + building += 1 + job["fullURL"] = "%s/job/%s" % ( + source["sourceURL"], + urllib.parse.quote(job["name"].replace("/", "%2F")), + ) + job["folder"] = source.get("folder") + real_jobs.append(job) + return real_jobs, building diff --git a/kibble/scanners/scanners/jira.py b/kibble/scanners/scanners/jira.py new file mode 100644 index 00000000..410f8d50 --- /dev/null +++ b/kibble/scanners/scanners/jira.py @@ -0,0 +1,463 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import time +import re +import hashlib +import threading +import requests.exceptions + +from kibble.scanners.utils import jsonapi + +""" +This is the Kibble JIRA scanner plugin. +""" + +title = "Scanner for Atlassian JIRA" +version = "0.1.0" + + +def accepts(source): + """ Determines whether we want to handle this source """ + if source["type"] == "jira": + return True + if source["type"] == "issuetracker": + jira = re.match(r"(https?://.+)/browse/([A-Z0-9]+)", url) + if jira: + return True + return False + + +def getTime(string): + return time.mktime( + time.strptime(re.sub(r"\..*", "", str(string)), "%Y-%m-%dT%H:%M:%S") + ) + + +def assigned(js): + if "items" in js: + for item in js["items"]: + if item["field"] == "assignee": + return True + return False + + +def wfi(js): + if "items" in js: + for item in js["items"]: + if item["field"] == "status" and item["toString"] == "Waiting for Infra": + return True + return False + + +def wfu(js): + if "items" in js: + for item in js["items"]: + if item["field"] == "status" and item["toString"] == "Waiting for user": + return True + return False + + +def moved(js): + if "items" in js: + for item in js["items"]: + if item["field"] == "Key" and item["toString"].find("INFRA-") != -1: + return True + return False + + +def wasclosed(js): + if "changelog" in js: + cjs = js["changelog"]["histories"] + for citem in cjs: + if "items" in citem: + for item in citem["items"]: + if item["field"] == "status" and ( + item["toString"].lower().find("closed") != -1 + or item["toString"].lower().find("resolved") != -1 + ): + return (True, citem.get("author", {})) + else: + if "items" in js: + for item in js["items"]: + if item["field"] == "status" and ( + item["toString"].find("Closed") != -1 + ): + return (True, None) + return (False, None) + + +def resolved(js): + if "items" in js: + for item in js["items"]: + if item["field"] == "resolution" and ( + item["toString"] != "Pending Closed" + and item["toString"] != "Unresolved" + ): + return True + return False + + +def pchange(js): + if "items" in js: + for item in js["items"]: + if item["field"] == "priority": + return True + return False + + +def scanTicket(KibbleBit, key, u, source, creds, openTickets): + """ Scans a single ticket for activity and people """ + + dhash = hashlib.sha224( + ("%s-%s-%s" % (source["organisation"], source["sourceURL"], key)).encode( + "ascii", errors="replace" + ) + ).hexdigest() + found = True + doc = None + parseIt = False + + # the 'domain' var we try to figure out here is used + # for faking email addresses and keep them unique, + # in case JIRA has email visibility turned off. + domain = "jira" + m = re.search(r"https?://([^/]+)", u) + if m: + domain = m.group(1) + + found = KibbleBit.exists("issue", dhash) + if not found: + KibbleBit.pprint("[%s] We've never seen this ticket before, parsing..." % key) + parseIt = True + else: + ticket = KibbleBit.get("issue", dhash) + if ticket["status"] == "closed" and key in openTickets: + KibbleBit.pprint("[%s] Ticket was reopened, reparsing" % key) + parseIt = True + elif ticket["status"] == "open" and not key in openTickets: + KibbleBit.pprint("[%s] Ticket was recently closed, parsing it" % key) + parseIt = True + else: + if ( + ticket["issueCreator"] == "unknown@kibble" + or ticket["issueCloser"] == "unknown@kibble" + ): # Gotta redo these! + parseIt = True + KibbleBit.pprint( + "[%s] Ticket contains erroneous data from a previous scan, reparsing" + % key + ) + # This is just noise! + # KibbleBit.pprint("[%s] Ticket hasn't changed, ignoring..." % key) + + if parseIt: + KibbleBit.pprint("[%s] Parsing data from JIRA at %s..." % (key, domain)) + queryURL = ( + "%s/rest/api/2/issue/%s?fields=creator,reporter,status,issuetype,summary,assignee,resolutiondate,created,priority,changelog,comment,resolution,votes&expand=changelog" + % (u, key) + ) + jiraURL = "%s/browse/%s" % (u, key) + try: + tjson = jsonapi.get(queryURL, auth=creds) + if not tjson: + KibbleBit.pprint("%s does not exist (404'ed)" % key) + return False + except requests.exceptions.ConnectionError as err: + KibbleBit.pprint("Connection error, skipping this ticket for now!") + return False + st, closer = wasclosed(tjson) + if st and not closer: + KibbleBit.pprint("Closed but no closer??") + closerEmail = None + status = "closed" if st else "open" + + # Make sure we actually have field data to work with + if not tjson.get("fields") or not tjson["fields"].get("created"): + KibbleBit.pprint( + "[%s] JIRA response is missing field data, ignoring ticket." % key + ) + return False + + cd = getTime(tjson["fields"]["created"]) + rd = ( + getTime(tjson["fields"]["resolutiondate"]) + if "resolutiondate" in tjson["fields"] and tjson["fields"]["resolutiondate"] + else None + ) + comments = 0 + if "comment" in tjson["fields"] and tjson["fields"]["comment"]: + comments = tjson["fields"]["comment"]["total"] + assignee = ( + tjson["fields"]["assignee"].get( + "emailAddress", # Try email, fall back to username + tjson["fields"]["assignee"].get("name"), + ) + if tjson["fields"].get("assignee") + else None + ) + creator = ( + tjson["fields"]["reporter"].get( + "emailAddress", # Try email, fall back to username + tjson["fields"]["reporter"].get("name"), + ) + if tjson["fields"].get("reporter") + else None + ) + title = tjson["fields"]["summary"] + if closer: + # print("Parsing closer") + closerEmail = ( + closer.get("emailAddress", closer.get("name")) + .replace(" dot ", ".", 10) + .replace(" at ", "@", 1) + ) + if not "@" in closerEmail: + closerEmail = "%s@%s" % (closerEmail, domain) + displayName = closer.get("displayName", "Unkown") + if displayName and len(displayName) > 0: + # Add to people db + pid = hashlib.sha1( + ("%s%s" % (source["organisation"], closerEmail)).encode( + "ascii", errors="replace" + ) + ).hexdigest() + jsp = { + "name": displayName, + "email": closerEmail, + "organisation": source["organisation"], + "id": pid, + "upsert": True, + } + KibbleBit.append("person", jsp) + + if creator: + creator = creator.replace(" dot ", ".", 10).replace(" at ", "@", 1) + if not "@" in creator: + creator = "%s@%s" % (creator, domain) + displayName = ( + tjson["fields"]["reporter"]["displayName"] + if tjson["fields"]["reporter"] + else None + ) + if displayName and len(displayName) > 0: + # Add to people db + pid = hashlib.sha1( + ("%s%s" % (source["organisation"], creator)).encode( + "ascii", errors="replace" + ) + ).hexdigest() + jsp = { + "name": displayName, + "email": creator, + "organisation": source["organisation"], + "id": pid, + "upsert": True, + } + KibbleBit.append("person", jsp) + if assignee and not "@" in assignee: + assignee = "%s@%s" % (assignee, domain) + jso = { + "id": dhash, + "key": key, + "organisation": source["organisation"], + "sourceID": source["sourceID"], + "url": jiraURL, + "status": status, + "created": cd, + "closed": rd, + "issuetype": "issue", + "issueCloser": closerEmail, + "createdDate": time.strftime("%Y/%m/%d %H:%M:%S", time.gmtime(cd)), + "closedDate": time.strftime("%Y/%m/%d %H:%M:%S", time.gmtime(rd)) + if rd + else None, + "changeDate": time.strftime( + "%Y/%m/%d %H:%M:%S", time.gmtime(rd if rd else cd) + ), + "assignee": assignee, + "issueCreator": creator, + "comments": comments, + "title": title, + } + KibbleBit.append("issue", jso) + return True + + +# +# except Exception as err: +# KibbleBit.pprint(err) +# return False + + +class jiraThread(threading.Thread): + def __init__(self, block, KibbleBit, source, creds, pt, ot): + super(jiraThread, self).__init__() + self.block = block + self.KibbleBit = KibbleBit + self.creds = creds + self.source = source + self.pendingTickets = pt + self.openTickets = ot + + def run(self): + badOnes = 0 + while len(self.pendingTickets) > 0 and badOnes <= 50: + # print("%u elements left to count" % len(pendingTickets)) + self.block.acquire() + try: + rl = self.pendingTickets.pop(0) + except Exception as err: + self.block.release() + return + if not rl: + self.block.release() + return + self.block.release() + if not scanTicket( + self.KibbleBit, rl[0], rl[1], rl[2], self.creds, self.openTickets + ): + self.KibbleBit.pprint("[%s] This borked, trying another one" % rl[0]) + badOnes += 1 + if badOnes > 100: + self.KibbleBit.pprint("Too many errors, bailing!") + self.source["steps"]["issues"] = { + "time": time.time(), + "status": "Too many errors while parsing at " + + time.strftime("%Y/%m/%d %H:%M:%S", time.gmtime(time.time())), + "running": False, + "good": False, + } + self.KibbleBit.updateSource(self.source) + return + else: + badOnes = 0 + + +def scan(KibbleBit, source): + jira = re.match(r"(https?://.+)/browse/([A-Z0-9]+)", source["sourceURL"]) + if jira: + + # JIRA NEEDS credentials to do a proper scan! + creds = None + if ( + source["creds"] + and "username" in source["creds"] + and source["creds"]["username"] + and len(source["creds"]["username"]) > 0 + ): + creds = "%s:%s" % (source["creds"]["username"], source["creds"]["password"]) + if not creds: + KibbleBit.pprint( + "JIRA at %s requires authentication, but none was found! Bailing." + % source["sourceURL"] + ) + source["steps"]["issues"] = { + "time": time.time(), + "status": "Parsing JIRA changes...", + "running": True, + "good": True, + } + KibbleBit.updateSource(source) + return + + source["steps"]["issues"] = { + "time": time.time(), + "status": "Parsing JIRA changes...", + "running": True, + "good": True, + } + KibbleBit.updateSource(source) + + badOnes = 0 + jsa = [] + jsp = [] + pendingTickets = [] + KibbleBit.pprint("Parsing JIRA activity at %s" % source["sourceURL"]) + source["steps"]["issues"] = { + "time": time.time(), + "status": "Downloading changeset", + "running": True, + "good": True, + } + KibbleBit.updateSource(source) + + # Get base URL, list and domain to parse + u = jira.group(1) + instance = jira.group(2) + lastTicket = 0 + latestURL = ( + "%s/rest/api/2/search?jql=project=%s+order+by+createdDate+DESC&fields=id,key&maxResults=1" + % (u, instance) + ) + js = None + + js = jsonapi.get(latestURL, auth=creds) + if "issues" in js and len(js["issues"]) == 1: + key = js["issues"][0]["key"] + m = re.search(r"-(\d+)$", key) + if m: + lastTicket = int(m.group(1)) + + openTickets = [] + startAt = 0 + badTries = 0 + while True and badTries < 10: + openURL = ( + "%s/rest/api/2/search?jql=project=%s+and+status=open+order+by+createdDate+ASC&fields=id,key&maxResults=100&startAt=%u" + % (u, instance, startAt) + ) + # print(openURL) + try: + ojs = jsonapi.get(openURL, auth=creds) + if not "issues" in ojs or len(ojs["issues"]) == 0: + break + for item in ojs["issues"]: + openTickets.append(item["key"]) + KibbleBit.pprint("Found %u open tickets" % len(openTickets)) + startAt += 100 + except: + KibbleBit.pprint("JIRA borked, retrying") + badTries += 1 + KibbleBit.pprint("Found %u open tickets" % len(openTickets)) + + badOnes = 0 + for i in reversed(range(1, lastTicket + 1)): + key = "%s-%u" % (instance, i) + pendingTickets.append([key, u, source]) + + threads = [] + block = threading.Lock() + KibbleBit.pprint("Scanning tickets using 4 sub-threads") + for i in range(0, 4): + t = jiraThread(block, KibbleBit, source, creds, pendingTickets, openTickets) + threads.append(t) + t.start() + + for t in threads: + t.join() + + KibbleBit.pprint("Done scanning %s" % source["sourceURL"]) + + source["steps"]["issues"] = { + "time": time.time(), + "status": "Issue tracker (JIRA) successfully scanned at " + + time.strftime("%Y/%m/%d %H:%M:%S", time.gmtime(time.time())), + "running": False, + "good": True, + } + KibbleBit.updateSource(source) diff --git a/kibble/scanners/scanners/pipermail.py b/kibble/scanners/scanners/pipermail.py new file mode 100644 index 00000000..d8b33dd2 --- /dev/null +++ b/kibble/scanners/scanners/pipermail.py @@ -0,0 +1,295 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import mailbox +import email.errors +import email.utils +import email.header +import time +import re +import os +import hashlib +import datetime + +from kibble.scanners.utils import urlmisc + +title = "Scanner for GNU Mailman Pipermail" +version = "0.1.0" + + +def accepts(source): + """ Whether or not we think this is pipermail """ + if source["type"] == "pipermail": + return True + if source["type"] == "mail": + url = source["sourceURL"] + pipermail = re.match(r"(https?://.+/(archives|pipermail)/.+?)/?$", url) + if pipermail: + return True + return False + + +def scan(KibbleBit, source): + url = source["sourceURL"] + pipermail = re.match(r"(https?://.+/(archives|pipermail)/.+?)/?$", url) + if pipermail: + KibbleBit.pprint("Scanning Pipermail source %s" % url) + skipped = 0 + jsa = [] + jsp = [] + source["steps"]["mail"] = { + "time": time.time(), + "status": "Downloading Pipermail statistics", + "running": True, + "good": True, + } + KibbleBit.updateSource(source) + + dt = time.gmtime(time.time()) + firstYear = 1970 + year = dt[0] + month = dt[1] + if month <= 0: + month += 12 + year -= 1 + months = 0 + + knowns = {} + + # While we have older archives, continue to parse + monthNames = [ + "December", + "January", + "February", + "March", + "April", + "May", + "June", + "July", + "August", + "September", + "October", + "November", + "December", + ] + while firstYear <= year: + gzurl = "%s/%04u-%s.txt.gz" % (url, year, monthNames[month]) + pd = datetime.date(year, month, 1).timetuple() + dhash = hashlib.sha224( + (("%s %s") % (source["organisation"], gzurl)).encode( + "ascii", errors="replace" + ) + ).hexdigest() + found = False + found = KibbleBit.exists("mailstats", dhash) + if ( + months <= 1 or not found + ): # Always parse this month's stats and the previous month :) + months += 1 + mailFile = urlmisc.unzip(gzurl) + if mailFile: + try: + skipped = 0 + messages = mailbox.mbox(mailFile) + + rawtopics = {} + posters = {} + no_posters = 0 + emails = 0 + senders = {} + for message in messages: + emails += 1 + sender = message["from"] + name = sender + if ( + not "subject" in message + or not message["subject"] + or not "from" in message + or not message["from"] + ): + continue + + irt = message.get("in-reply-to", None) + if not irt and message.get("references"): + irt = message.get("references").split("\n")[0].strip() + replyto = None + if irt and irt in senders: + replyto = senders[irt] + print("This is a reply to %s" % replyto) + raw_subject = re.sub( + r"^[a-zA-Z]+\s*:\s*", "", message["subject"], count=10 + ) + raw_subject = re.sub( + r"[\r\n\t]+", "", raw_subject, count=10 + ) + if not raw_subject in rawtopics: + rawtopics[raw_subject] = 0 + rawtopics[raw_subject] += 1 + m = re.match( + r"(.+?) at (.+?) \((.*)\)$", + message["from"], + flags=re.UNICODE, + ) + if m: + name = m.group(3).strip() + sender = m.group(1) + "@" + m.group(2) + else: + m = re.match( + r"(.+)\s*<(.+)>", message["from"], flags=re.UNICODE + ) + if m: + name = m.group(1).replace('"', "").strip() + sender = m.group(2) + if not sender in posters: + posters[sender] = {"name": name, "email": sender} + senders[message.get("message-id", "??")] = sender + mdate = email.utils.parsedate_tz(message["date"]) + mdatestring = time.strftime( + "%Y/%m/%d %H:%M:%S", + time.gmtime(email.utils.mktime_tz(mdate)), + ) + if not sender in knowns: + sid = hashlib.sha1( + ("%s%s" % (source["organisation"], sender)).encode( + "ascii", errors="replace" + ) + ).hexdigest() + knowns[sender] = KibbleBit.exists("person", sid) + if not sender in knowns: + KibbleBit.append( + "person", + { + "name": name, + "email": sender, + "organisation": source["organisation"], + "id": hashlib.sha1( + ( + "%s%s" + % (source["organisation"], sender) + ).encode("ascii", errors="replace") + ).hexdigest(), + }, + ) + knowns[sender] = True + jse = { + "organisation": source["organisation"], + "sourceURL": source["sourceURL"], + "sourceID": source["sourceID"], + "date": mdatestring, + "sender": sender, + "replyto": replyto, + "subject": message["subject"], + "address": sender, + "ts": email.utils.mktime_tz(mdate), + "id": message["message-id"], + } + KibbleBit.append("email", jse) + + for sender in posters: + no_posters += 1 + i = 0 + topics = 0 + for key in rawtopics: + topics += 1 + for key in reversed(sorted(rawtopics, key=lambda x: x)): + val = rawtopics[key] + i += 1 + if i > 10: + break + KibbleBit.pprint( + "Found top 10: %s (%s emails)" % (key, val) + ) + shash = hashlib.sha224( + key.encode("ascii", errors="replace") + ).hexdigest() + md = time.strftime("%Y/%m/%d %H:%M:%S", pd) + mlhash = hashlib.sha224( + ( + ("%s%s%s%s") + % ( + key, + source["sourceURL"], + source["organisation"], + md, + ) + ).encode("ascii", errors="replace") + ).hexdigest() # one unique id per month per mail thread + jst = { + "organisation": source["organisation"], + "sourceURL": source["sourceURL"], + "sourceID": source["sourceID"], + "date": md, + "emails": val, + "shash": shash, + "subject": key, + "ts": time.mktime(pd), + "id": mlhash, + } + KibbleBit.index("mailtop", mlhash, jst) + + jso = { + "organisation": source["organisation"], + "sourceURL": source["sourceURL"], + "sourceID": source["sourceID"], + "date": time.strftime("%Y/%m/%d %H:%M:%S", pd), + "authors": no_posters, + "emails": emails, + "topics": topics, + } + KibbleBit.index("mailstats", dhash, jso) + + os.unlink(mailFile) + except Exception as err: + KibbleBit.pprint( + "Couldn't parse %s, skipping: %s" % (gzurl, err) + ) + skipped += 1 + if skipped > 12: + KibbleBit.pprint( + "12 skips in a row, breaking off (no more data?)" + ) + break + else: + KibbleBit.pprint("Couldn't find %s, skipping." % gzurl) + skipped += 1 + if skipped > 12: + KibbleBit.pprint( + "12 skips in a row, breaking off (no more data?)" + ) + break + month -= 1 + if month <= 0: + month += 12 + year -= 1 + + source["steps"]["mail"] = { + "time": time.time(), + "status": "Mail archives successfully scanned at " + + time.strftime("%Y/%m/%d %H:%M:%S", time.gmtime(time.time())), + "running": False, + "good": True, + } + KibbleBit.updateSource(source) + else: + KibbleBit.pprint("Invalid Pipermail URL detected: %s" % url, True) + source["steps"]["mail"] = { + "time": time.time(), + "status": "Invalid or malformed URL detected!", + "running": False, + "good": False, + } + KibbleBit.updateSource(source) diff --git a/kibble/scanners/scanners/ponymail-kpe.py b/kibble/scanners/scanners/ponymail-kpe.py new file mode 100644 index 00000000..b5b40072 --- /dev/null +++ b/kibble/scanners/scanners/ponymail-kpe.py @@ -0,0 +1,134 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import time +import re + +from kibble.scanners.utils import jsonapi, kpe + +""" +This is a Kibble scanner plugin for Apache Pony Mail sources. +""" + +title = "Key Phrase Extraction plugin for Apache Pony Mail" +version = "0.1.0" +ROBITS = r"(git|gerrit|jenkins|hudson|builds|bugzilla)@" +MAX_COUNT = ( + 100 +) # Max number of unparsed emails to handle (so we don't max out API credits!) + + +def accepts(source): + """ Test if source matches a Pony Mail archive """ + # If the source equals the plugin name, assume a yes + if source["type"] == "ponymail": + return True + + # If it's of type 'mail', check the URL + if source["type"] == "mail": + if re.match(r"(https?://.+)/list\.html\?(.+)@(.+)", source["sourceURL"]): + return True + + # Default to not recognizing the source + return False + + +def scan(KibbleBit, source): + # Validate URL first + url = re.match(r"(https?://.+)/list\.html\?(.+)@(.+)", source["sourceURL"]) + if not url: + KibbleBit.pprint( + "Malformed or invalid Pony Mail URL passed to scanner: %s" + % source["sourceURL"] + ) + source["steps"]["mail"] = { + "time": time.time(), + "status": "Could not parse Pony Mail URL!", + "running": False, + "good": False, + } + KibbleBit.updateSource(source) + return + + if not "azure" in KibbleBit.config and not "picoapi" in KibbleBit.config: + KibbleBit.pprint( + "No Azure/picoAPI creds configured, skipping key phrase extraction" + ) + return + + cookie = None + if "creds" in source and source["creds"]: + cookie = source["creds"].get("cookie", None) + + rootURL = re.sub(r"list.html.+", "", source["sourceURL"]) + query = { + "query": {"bool": {"must": [{"term": {"sourceID": source["sourceID"]}}]}}, + "sort": [{"ts": "desc"}], + } + + # Get an initial count of commits + res = KibbleBit.broker.DB.search( + index=KibbleBit.dbname, doc_type="email", body=query, size=MAX_COUNT * 4 + ) + ec = 0 + hits = [] + for hit in res["hits"]["hits"]: + eml = hit["_source"] + if not re.search(ROBITS, eml["sender"]): + ec += 1 + if ec > MAX_COUNT: + break + if "kpe" not in eml: + emlurl = "%s/api/email.lua?id=%s" % (rootURL, eml["id"]) + KibbleBit.pprint("Fetching %s" % emlurl) + rv = None + try: + rv = jsonapi.get(emlurl, cookie=cookie) + if rv and "body" in rv: + hits.append([hit["_id"], rv["body"], eml]) + except Exception as err: + KibbleBit.pprint("Server error, skipping this email") + + bodies = [] + for hit in hits: + body = hit[1] + bid = hit[0] + bodies.append(body) + if bodies: + if "watson" in KibbleBit.config: + pass # Haven't written this yet + elif "azure" in KibbleBit.config: + KPEs = kpe.azureKPE(KibbleBit, bodies) + elif "picoapi" in KibbleBit.config: + KPEs = kpe.picoKPE(KibbleBit, bodies) + if KPEs == False: + KibbleBit.pprint("Hit rate limit, not trying further emails for now.") + + a = 0 + for hit in hits: + kpe_ = KPEs[a] + bid = hit[0] + eml = hit[2] + a += 1 + if not kpe_: + kpe_ = ["_NULL_"] + eml["kpe"] = kpe_ + print("Key phrases for %s: %s" % (bid, ", ".join(kpe_))) + KibbleBit.index("email", bid, eml) + else: + KibbleBit.pprint("No emails to analyze") + KibbleBit.pprint("Done with key phrase extraction") diff --git a/kibble/scanners/scanners/ponymail-tone.py b/kibble/scanners/scanners/ponymail-tone.py new file mode 100644 index 00000000..c616a475 --- /dev/null +++ b/kibble/scanners/scanners/ponymail-tone.py @@ -0,0 +1,137 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +""" +This is a Kibble scanner plugin for Apache Pony Mail sources. +""" +import time +import re + +from kibble.scanners.utils import jsonapi, tone + +title = "Tone/Mood Scanner plugin for Apache Pony Mail" +version = "0.1.0" +ROBITS = r"(git|gerrit|jenkins|hudson|builds|bugzilla)@" +MAX_COUNT = 250 + + +def accepts(source): + """ Test if source matches a Pony Mail archive """ + # If the source equals the plugin name, assume a yes + if source["type"] == "ponymail": + return True + + # If it's of type 'mail', check the URL + if source["type"] == "mail": + if re.match(r"(https?://.+)/list\.html\?(.+)@(.+)", source["sourceURL"]): + return True + + # Default to not recognizing the source + return False + + +def scan(KibbleBit, source): + # Validate URL first + url = re.match(r"(https?://.+)/list\.html\?(.+)@(.+)", source["sourceURL"]) + if not url: + KibbleBit.pprint( + "Malformed or invalid Pony Mail URL passed to scanner: %s" + % source["sourceURL"] + ) + source["steps"]["mail"] = { + "time": time.time(), + "status": "Could not parse Pony Mail URL!", + "running": False, + "good": False, + } + KibbleBit.updateSource(source) + return + + if ( + not "watson" in KibbleBit.config + and not "azure" in KibbleBit.config + and not "picoapi" in KibbleBit.config + ): + KibbleBit.pprint( + "No Watson/Azure/picoAPI creds configured, skipping tone analyzer" + ) + return + + cookie = None + if "creds" in source and source["creds"]: + cookie = source["creds"].get("cookie", None) + + rootURL = re.sub(r"list.html.+", "", source["sourceURL"]) + query = { + "query": {"bool": {"must": [{"term": {"sourceID": source["sourceID"]}}]}}, + "sort": [{"ts": "desc"}], + } + + # Get an initial count of commits + res = KibbleBit.broker.DB.search( + index=KibbleBit.dbname, doc_type="email", body=query, size=MAX_COUNT * 4 + ) + ec = 0 + hits = [] + for hit in res["hits"]["hits"]: + eml = hit["_source"] + if not re.search(ROBITS, eml["sender"]): + ec += 1 + if ec > MAX_COUNT: + break + if "mood" not in eml: + emlurl = "%s/api/email.lua?id=%s" % (rootURL, eml["id"]) + KibbleBit.pprint("Fetching %s" % emlurl) + rv = None + try: + rv = jsonapi.get(emlurl, cookie=cookie) + if rv and "body" in rv: + hits.append([hit["_id"], rv["body"], eml]) + except Exception as err: + KibbleBit.pprint("Server error, skipping this email") + + bodies = [] + for hit in hits: + body = hit[1] + bid = hit[0] + bodies.append(body) + if bodies: + if "watson" in KibbleBit.config: + moods = tone.watsonTone(KibbleBit, bodies) + elif "azure" in KibbleBit.config: + moods = tone.azureTone(KibbleBit, bodies) + elif "picoapi" in KibbleBit.config: + moods = tone.picoTone(KibbleBit, bodies) + if moods == False: + KibbleBit.pprint("Hit rate limit, not trying further emails for now.") + + a = 0 + for hit in hits: + mood = moods[a] + bid = hit[0] + eml = hit[2] + a += 1 + eml["mood"] = mood + hm = [0, "unknown"] + for m, s in mood.items(): + if s > hm[0]: + hm = [s, m] + print("Likeliest overall mood for %s: %s" % (bid, hm[1])) + KibbleBit.index("email", bid, eml) + else: + KibbleBit.pprint("No emails to analyze") + KibbleBit.pprint("Done with tone analysis") diff --git a/kibble/scanners/scanners/ponymail.py b/kibble/scanners/scanners/ponymail.py new file mode 100644 index 00000000..89e15723 --- /dev/null +++ b/kibble/scanners/scanners/ponymail.py @@ -0,0 +1,309 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import time +import re +import hashlib +import datetime + +from kibble.scanners.utils import jsonapi + +""" +This is a Kibble scanner plugin for Apache Pony Mail sources. +""" + +title = "Scanner plugin for Apache Pony Mail" +version = "0.1.0" + + +def accepts(source): + """ Test if source matches a Pony Mail archive """ + # If the source equals the plugin name, assume a yes + if source["type"] == "ponymail": + return True + + # If it's of type 'mail', check the URL + if source["type"] == "mail": + if re.match(r"(https?://.+)/list\.html\?(.+)@(.+)", source["sourceURL"]): + return True + + # Default to not recognizing the source + return False + + +def countSubs(struct, kids=0): + """ Counts replies in a thread """ + if "children" in struct and len(struct["children"]) > 0: + for child in struct["children"]: + kids += 1 + kids += countSubs(child) + return kids + + +def repliedTo(emails, struct): + myList = {} + for eml in struct: + myID = eml["tid"] + if "children" in eml: + for child in eml["children"]: + myList[child["tid"]] = myID + if len(child["children"]) > 0: + cList = repliedTo(emails, child["children"]) + myList.update(cList) + return myList + + +def getSender(email): + sender = email["from"] + name = sender + m = re.match(r"(.+)\s*<(.+)>", email["from"], flags=re.UNICODE) + if m: + name = m.group(1).replace('"', "").strip() + sender = m.group(2) + return sender + + +def scan(KibbleBit, source): + # Validate URL first + url = re.match(r"(https?://.+)/list\.html\?(.+)@(.+)", source["sourceURL"]) + if not url: + KibbleBit.pprint( + "Malformed or invalid Pony Mail URL passed to scanner: %s" + % source["sourceURL"] + ) + source["steps"]["mail"] = { + "time": time.time(), + "status": "Could not parse Pony Mail URL!", + "running": False, + "good": False, + } + KibbleBit.updateSource(source) + return + + # Pony Mail requires a UI cookie in order to work. Maked sure we have one! + cookie = None + if "creds" in source and source["creds"]: + cookie = source["creds"].get("cookie", None) + if not cookie: + KibbleBit.pprint( + "Pony Mail instance at %s requires an authorized cookie, none found! Bailing." + % source["sourceURL"] + ) + source["steps"]["mail"] = { + "time": time.time(), + "status": "No authorized cookie found in source object.", + "running": False, + "good": False, + } + KibbleBit.updateSource(source) + return + + # Notify scanner and DB that this is valid and we've begun parsing + KibbleBit.pprint("%s is a valid Pony Mail address, parsing" % source["sourceURL"]) + source["steps"]["mail"] = { + "time": time.time(), + "status": "Downloading Pony Mail statistics", + "running": True, + "good": True, + } + KibbleBit.updateSource(source) + + # Get base URL, list and domain to parse + u = url.group(1) + l = url.group(2) + d = url.group(3) + + # Get this month + dt = time.gmtime(time.time()) + firstYear = 1970 + year = dt[0] + month = dt[1] + if month <= 0: + month += 12 + year -= 1 + months = 0 + + # Hash for keeping records of who we know + knowns = {} + + # While we have older archives, continue to parse + while firstYear <= year: + statsurl = "%s/api/stats.lua?list=%s&domain=%s&d=%s" % ( + u, + l, + d, + "%04u-%02u" % (year, month), + ) + dhash = hashlib.sha224( + (("%s %s") % (source["organisation"], statsurl)).encode( + "ascii", errors="replace" + ) + ).hexdigest() + found = False + if KibbleBit.exists("mailstats", dhash): + found = True + if months <= 1 or not found: # Always parse this month's stats :) + months += 1 + KibbleBit.pprint("Parsing %04u-%02u" % (year, month)) + KibbleBit.pprint(statsurl) + pd = datetime.date(year, month, 1).timetuple() + try: + js = jsonapi.get(statsurl, cookie=cookie) + except Exception as err: + KibbleBit.pprint("Server error, skipping this month") + month -= 1 + if month <= 0: + month += 12 + year -= 1 + continue + if "firstYear" in js: + firstYear = js["firstYear"] + # print("First Year is %u" % firstYear) + else: + KibbleBit.pprint("JSON was missing fields, aborting!") + break + replyList = repliedTo(js["emails"], js["thread_struct"]) + topics = js["no_threads"] + posters = {} + no_posters = 0 + emails = len(js["emails"]) + top10 = [] + for eml in js["thread_struct"]: + count = countSubs(eml, 0) + subject = "" + for reml in js["emails"]: + if reml["id"] == eml["tid"]: + subject = reml["subject"] + break + if len(subject) > 0 and count > 0: + subject = re.sub( + r"^((re|fwd|aw|fw):\s*)+", "", subject, flags=re.IGNORECASE + ) + subject = re.sub(r"[\r\n\t]+", "", subject, count=20) + emlid = hashlib.sha1( + subject.encode("ascii", errors="replace") + ).hexdigest() + top10.append([emlid, subject, count]) + i = 0 + for top in reversed(sorted(top10, key=lambda x: x[2])): + i += 1 + if i > 10: + break + KibbleBit.pprint("Found top 10: %s (%s emails)" % (top[1], top[2])) + md = time.strftime("%Y/%m/%d %H:%M:%S", pd) + mlhash = hashlib.sha224( + ( + ("%s%s%s%s") + % (top[0], source["sourceURL"], source["organisation"], md) + ).encode("ascii", errors="replace") + ).hexdigest() # one unique id per month per mail thread + jst = { + "organisation": source["organisation"], + "sourceURL": source["sourceURL"], + "sourceID": source["sourceID"], + "date": md, + "emails": top[2], + "shash": top[0], + "subject": top[1], + "ts": time.mktime(pd), + "id": mlhash, + } + KibbleBit.index("mailtop", mlhash, jst) + + for email in js["emails"]: + sender = email["from"] + name = sender + m = re.match(r"(.+)\s*<(.+)>", email["from"], flags=re.UNICODE) + if m: + name = m.group(1).replace('"', "").strip() + sender = m.group(2) + if not sender in posters: + posters[sender] = {"name": name, "email": sender} + if not sender in knowns: + sid = hashlib.sha1( + ("%s%s" % (source["organisation"], sender)).encode( + "ascii", errors="replace" + ) + ).hexdigest() + if KibbleBit.exists("person", sid): + knowns[sender] = True + if not sender in knowns or name != sender: + KibbleBit.append( + "person", + { + "upsert": True, + "name": name, + "email": sender, + "organisation": source["organisation"], + "id": hashlib.sha1( + ("%s%s" % (source["organisation"], sender)).encode( + "ascii", errors="replace" + ) + ).hexdigest(), + }, + ) + knowns[sender] = True + replyTo = None + if email["id"] in replyList: + rt = replyList[email["id"]] + for eml in js["emails"]: + if eml["id"] == rt: + replyTo = getSender(eml) + print("Email was reply to %s" % sender) + jse = { + "organisation": source["organisation"], + "sourceURL": source["sourceURL"], + "sourceID": source["sourceID"], + "date": time.strftime( + "%Y/%m/%d %H:%M:%S", time.gmtime(email["epoch"]) + ), + "sender": sender, + "address": sender, + "subject": email["subject"], + "replyto": replyTo, + "ts": email["epoch"], + "id": email["id"], + "upsert": True, + } + KibbleBit.append("email", jse) + for sender in posters: + no_posters += 1 + + jso = { + "organisation": source["organisation"], + "sourceURL": source["sourceURL"], + "sourceID": source["sourceID"], + "date": time.strftime("%Y/%m/%d %H:%M:%S", pd), + "authors": no_posters, + "emails": emails, + "topics": topics, + } + # print("Indexing as %s" % dhash) + KibbleBit.index("mailstats", dhash, jso) + month -= 1 + if month <= 0: + month += 12 + year -= 1 + + source["steps"]["mail"] = { + "time": time.time(), + "status": "Mail archives successfully scanned at " + + time.strftime("%Y/%m/%d %H:%M:%S", time.gmtime(time.time())), + "running": False, + "good": True, + } + KibbleBit.updateSource(source) diff --git a/kibble/scanners/scanners/travis.py b/kibble/scanners/scanners/travis.py new file mode 100644 index 00000000..b8c92820 --- /dev/null +++ b/kibble/scanners/scanners/travis.py @@ -0,0 +1,393 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import time +import datetime +import re +import hashlib +import threading +import requests +import requests.exceptions + +""" +This is the Kibble Travis CI scanner plugin. +""" + +title = "Scanner for Travis CI" +version = "0.1.0" + + +def accepts(source): + """ Determines whether we want to handle this source """ + if source["type"] == "travis": + return True + return False + + +def scanJob(KibbleBit, source, bid, token, TLD): + """ Scans a single job for activity """ + NOW = int(datetime.datetime.utcnow().timestamp()) + dhash = hashlib.sha224( + ("%s-%s-%s" % (source["organisation"], source["sourceURL"], bid)).encode( + "ascii", errors="replace" + ) + ).hexdigest() + found = True + doc = None + parseIt = False + found = KibbleBit.exists("cijob", dhash) + + # Get the job data + pages = 0 + offset = 0 + last_page = False + oURL = "https://api.travis-ci.%s/repo/%s/builds" % (TLD, bid) + + # For as long as pagination makes sense... + while last_page == False: + bURL = "https://api.travis-ci.%s/repo/%s/builds?limit=100&offset=%u" % ( + TLD, + bid, + offset, + ) + KibbleBit.pprint("Scanning %s" % bURL) + rv = requests.get( + bURL, + headers={"Travis-API-Version": "3", "Authorization": "token %s" % token}, + ) + if rv.status_code == 200: + repojs = rv.json() + # If travis tells us it's the last page, trust it. + if repojs["@pagination"]["is_last"]: + KibbleBit.pprint( + "Assuming this is the last page we need (travis says so)" + ) + last_page = True + + KibbleBit.pprint( + "%s has %u builds done" % (bURL, repojs["@pagination"]["count"]) + ) + + # BREAKER: If we go past count somehow, and travis doesn't say so, bork anyway + if repojs["@pagination"]["count"] < offset: + return True + + offset += 100 + for build in repojs.get("builds", []): + buildID = build["id"] + buildProject = build["repository"]["slug"] + startedAt = build["started_at"] + finishedAt = build["finished_at"] + duration = build["duration"] + completed = True if duration else False + duration = duration or 0 + + buildhash = hashlib.sha224( + ( + "%s-%s-%s-%s" + % (source["organisation"], source["sourceURL"], bid, buildID) + ).encode("ascii", errors="replace") + ).hexdigest() + builddoc = None + try: + builddoc = KibbleBit.get("ci_build", buildhash) + except: + pass + + # If this build already completed, no need to parse it again + if builddoc and builddoc.get("completed", False): + # If we're on page > 1 and we've seen a completed build, assume + # that we don't need the older ones + if pages > 1: + KibbleBit.pprint( + "Assuming this is the last page we need (found completed build on page > 1)" + ) + last_page = True + break + continue + + # Get build status (success, failed, canceled etc) + status = "building" + if build["state"] in ["finished", "passed"]: + status = "success" + if build["state"] in ["failed", "errored"]: + status = "failed" + if build["state"] in ["aborted", "canceled"]: + status = "aborted" + + FIN = 0 + STA = 0 + if finishedAt: + FIN = datetime.datetime.strptime( + finishedAt, "%Y-%m-%dT%H:%M:%SZ" + ).timestamp() + if startedAt: + STA = int( + datetime.datetime.strptime( + startedAt, "%Y-%m-%dT%H:%M:%SZ" + ).timestamp() + ) + + # We don't know how to calc queues yet, set to 0 + queuetime = 0 + + doc = { + # Build specific data + "id": buildhash, + "date": time.strftime("%Y/%m/%d %H:%M:%S", time.gmtime(FIN)), + "buildID": buildID, + "completed": completed, + "duration": duration * 1000, + "job": buildProject, + "jobURL": oURL, + "status": status, + "started": STA, + "ci": "travis", + "queuetime": queuetime, + # Standard docs values + "sourceID": source["sourceID"], + "organisation": source["organisation"], + "upsert": True, + } + KibbleBit.append("ci_build", doc) + pages += 1 + else: + # We hit a snag, abort! + KibbleBit.pprint("Travis returned a non-200 response, aborting.") + return False + + return True + + +class travisThread(threading.Thread): + """ Generic thread class for scheduling multiple scans at once """ + + def __init__(self, block, KibbleBit, source, token, jobs, TLD): + super(travisThread, self).__init__() + self.block = block + self.KibbleBit = KibbleBit + self.token = token + self.source = source + self.jobs = jobs + self.tld = TLD + + def run(self): + badOnes = 0 + while len(self.jobs) > 0 and badOnes <= 50: + self.block.acquire() + try: + job = self.jobs.pop(0) + except Exception as err: + self.block.release() + return + if not job: + self.block.release() + return + self.block.release() + if not scanJob(self.KibbleBit, self.source, job, self.token, self.tld): + self.KibbleBit.pprint("[%s] This borked, trying another one" % job) + badOnes += 1 + if badOnes > 100: + self.KibbleBit.pprint("Too many errors, bailing!") + self.source["steps"]["travis"] = { + "time": time.time(), + "status": "Too many errors while parsing at " + + time.strftime("%Y/%m/%d %H:%M:%S", time.gmtime(time.time())), + "running": False, + "good": False, + } + self.KibbleBit.updateSource(self.source) + return + else: + badOnes = 0 + + +def scan(KibbleBit, source): + # Simple URL check + travis = re.match(r"https?://travis-ci\.(org|com)", source["sourceURL"]) + if travis: + # Is this travs-ci.org or travis-ci.com - we need to know! + TLD = travis.group(1) + source["steps"]["travis"] = { + "time": time.time(), + "status": "Parsing Travis job changes...", + "running": True, + "good": True, + } + KibbleBit.updateSource(source) + + badOnes = 0 + pendingJobs = [] + KibbleBit.pprint("Parsing Travis activity at %s" % source["sourceURL"]) + source["steps"]["travis"] = { + "time": time.time(), + "status": "Downloading changeset", + "running": True, + "good": True, + } + KibbleBit.updateSource(source) + + # Travis needs a token + token = None + if ( + source["creds"] + and "token" in source["creds"] + and source["creds"]["token"] + and len(source["creds"]["token"]) > 0 + ): + token = source["creds"]["token"] + else: + KibbleBit.pprint("Travis CI requires a token to work!") + return False + + # Get the job list, paginated + sURL = source["sourceURL"] + + # Used for pagination + jobs = 100 + offset = 0 + + # Counters; builds queued, running and total jobs + queued = 0 # We don't know how to count this yet + building = 0 + total = 0 + blocked = 0 # Dunno how to count yet + stuck = 0 # Ditto + avgqueuetime = 0 # Ditto, fake it + + maybeQueued = [] + while jobs == 100: + URL = ( + "https://api.travis-ci.%s/repos?repository.active=true&sort_by=current_build:desc&offset=%u&limit=100&include=repository.last_started_build" + % (TLD, offset) + ) + offset += 100 + r = requests.get( + URL, + headers={ + "Travis-API-Version": "3", + "Authorization": "token %s" % token, + }, + ) + + if r.status_code != 200: + KibbleBit.pprint("Travis did not return a 200 Okay, bad token?!") + + source["steps"]["travis"] = { + "time": time.time(), + "status": "Travis CI scan failed at " + + time.strftime( + "%Y/%m/%d %H:%M:%S", time.gmtime(time.time()) + ". Bad token??!" + ), + "running": False, + "good": False, + } + KibbleBit.updateSource(source) + return + + # For each build job + js = r.json() + for repo in js["repositories"]: + total += 1 + cb = repo.get("last_started_build") + if cb: + # Is the build currently running? + if cb["state"] in ["started", "created", "queued", "pending"]: + for job in cb.get("jobs", []): + maybeQueued.append(job["id"]) + + # Queue up build jobs for the threaded scanner + bid = repo["id"] + pendingJobs.append(bid) + + jobs = len(js["repositories"]) + KibbleBit.pprint("Scanned %u jobs..." % total) + + # Find out how many building and pending jobs + for jobID in maybeQueued: + URL = "https://api.travis-ci.%s/job/%u" % (TLD, jobID) + r = requests.get( + URL, + headers={ + "Travis-API-Version": "3", + "Authorization": "token %s" % token, + }, + ) + if r.status_code == 200: + jobjs = r.json() + if jobjs["state"] == "started": + building += 1 + KibbleBit.pprint("Job %u is building" % jobID) + elif jobjs["state"] in ["created", "queued", "pending"]: + queued += 1 + blocked += ( + 1 + ) # Queued in Travis generally means a job can't find an executor, and thus is blocked. + KibbleBit.pprint("Job %u is pending" % jobID) + KibbleBit.pprint("%u building, %u queued..." % (building, queued)) + + # Save queue snapshot + NOW = int(datetime.datetime.utcnow().timestamp()) + queuehash = hashlib.sha224( + ( + "%s-%s-queue-%s" + % (source["organisation"], source["sourceURL"], int(time.time())) + ).encode("ascii", errors="replace") + ).hexdigest() + + # Write up a queue doc + queuedoc = { + "id": queuehash, + "date": time.strftime("%Y/%m/%d %H:%M:%S", time.gmtime(NOW)), + "time": NOW, + "building": building, + "size": queued, + "blocked": blocked, + "stuck": stuck, + "avgwait": avgqueuetime, + "ci": "travis", + # Standard docs values + "sourceID": source["sourceID"], + "organisation": source["organisation"], + "upsert": True, + } + KibbleBit.append("ci_queue", queuedoc) + + KibbleBit.pprint("Found %u jobs in Travis" % len(pendingJobs)) + + threads = [] + block = threading.Lock() + KibbleBit.pprint("Scanning jobs using 4 sub-threads") + for i in range(0, 4): + t = travisThread(block, KibbleBit, source, token, pendingJobs, TLD) + threads.append(t) + t.start() + + for t in threads: + t.join() + + # We're all done, yaay + KibbleBit.pprint("Done scanning %s" % source["sourceURL"]) + + source["steps"]["travis"] = { + "time": time.time(), + "status": "Travis successfully scanned at " + + time.strftime("%Y/%m/%d %H:%M:%S", time.gmtime(time.time())), + "running": False, + "good": True, + } + KibbleBit.updateSource(source) diff --git a/kibble/scanners/scanners/twitter.py b/kibble/scanners/scanners/twitter.py new file mode 100644 index 00000000..b274fe79 --- /dev/null +++ b/kibble/scanners/scanners/twitter.py @@ -0,0 +1,149 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +""" +This is a Kibble scanner plugin for Twitter sources. +""" +import time +import hashlib +import twitter + +title = "Scanner plugin for Twitter" +version = "0.1.0" + + +def accepts(source): + """ Test if source matches a Twitter handle """ + # If the source equals the plugin name, assume a yes + if source["type"] == "twitter": + return True + + # Default to not recognizing the source + return False + + +def getFollowers(KibbleBit, source, t): + """ Get followers of a handle, store them for mapping and trend purposes""" + # Get our twitter handle + handle = source["sourceURL"] + + # Get number of followers + tuser = t.GetUser(screen_name=handle) + no_followers = tuser.followers_count + d = time.strftime("%Y/%m/%d 0:00:00", time.gmtime()) # Today at midnight + dhash = hashlib.sha224( + ( + ("twitter:%s:%s:%s") % (source["organisation"], source["sourceURL"], d) + ).encode("ascii", errors="replace") + ).hexdigest() + jst = { + "organisation": source["organisation"], + "sourceURL": source["sourceURL"], + "sourceID": source["sourceID"], + "id": dhash, + "followers": no_followers, + "date": d, + } + KibbleBit.pprint("%s has %u followers currently." % (handle, no_followers)) + KibbleBit.index("twitter_followers", dhash, jst) + + # Collect list of current followers + followers = t.GetFollowers(screen_name=handle) + + # For each follower, if they're not mapped yet, add them + # This has a limitation of 100 new added per run, but meh... + KibbleBit.pprint("Looking up followers of %s" % handle) + for follower in followers: + # id, name, screen_name are useful here + KibbleBit.pprint("Found %s as follower" % follower.screen_name) + + # Store twitter follower profile if not already logged + dhash = hashlib.sha224( + ( + ("twitter:%s:%s:%s") % (source["organisation"], handle, follower.id) + ).encode("ascii", errors="replace") + ).hexdigest() + if not KibbleBit.exists("twitter_follow", dhash): + jst = { + "organisation": source["organisation"], + "sourceURL": source["sourceURL"], + "sourceID": source["sourceID"], + "twitterid": follower.id, + "name": follower.name, + "screenname": follower.screen_name, + "id": dhash, + "date": time.strftime( + "%Y/%m/%d %H:%M:%S", time.gmtime() + ), # First time we spotted them following. + } + KibbleBit.pprint( + "%s is new, recording date and details." % follower.screen_name + ) + KibbleBit.index("twitter_follow", dhash, jst) + + +def scan(KibbleBit, source): + source["steps"]["twitter"] = { + "time": time.time(), + "status": "Scanning Twitter activity and status", + "running": True, + "good": True, + } + KibbleBit.updateSource(source) + t = None + if "creds" in source and source["creds"]: + t = twitter.Api( + access_token_key=source["creds"].get("token", None), + access_token_secret=source["creds"].get("token_secret", None), + consumer_key=source["creds"].get("consumer_key", None), + consumer_secret=source["creds"].get("consumer_secret", None), + ) + KibbleBit.pprint("Verrifying twitter credentials...") + try: + t.VerifyCredentials() + except: + source["steps"]["twitter"] = { + "time": time.time(), + "status": "Could not verify twitter credentials", + "running": False, + "good": False, + } + KibbleBit.updateSource(source) + KibbleBit.pprint("Could not verify twitter creds, aborting!") + return + # Start by getting and saving followers + try: + getFollowers(KibbleBit, source, t) + except Exception as err: + source["steps"]["twitter"] = { + "time": time.time(), + "status": "Could not scan Twitter: %s" % err, + "running": False, + "good": False, + } + KibbleBit.updateSource(source) + KibbleBit.pprint("Twitter scan failed: %s" % err) + + # All done, report that! + source["steps"]["twitter"] = { + "time": time.time(), + "status": "Twitter successfully scanned at " + + time.strftime("%Y/%m/%d %H:%M:%S", time.gmtime(time.time())), + "running": False, + "good": True, + } + KibbleBit.updateSource(source) diff --git a/kibble/scanners/utils/__init__.py b/kibble/scanners/utils/__init__.py new file mode 100644 index 00000000..13a83393 --- /dev/null +++ b/kibble/scanners/utils/__init__.py @@ -0,0 +1,16 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. diff --git a/kibble/scanners/utils/git.py b/kibble/scanners/utils/git.py new file mode 100644 index 00000000..c7809193 --- /dev/null +++ b/kibble/scanners/utils/git.py @@ -0,0 +1,93 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +""" This is the Kibble git utility plugin """ + +import os +import sys +import subprocess +import re + + +def defaultBranch(source, datapath, KibbleBit=None): + """ Tries to figure out what the main branch of a repo is """ + wanted_branches = ["master", "trunk"] + branch = "" + # If we have an override of branches we like, use 'em + if KibbleBit and KibbleBit.config.get("git"): + wanted_branches = KibbleBit.config["git"].get( + "wanted_branches", wanted_branches + ) + foundBranch = False + + # For each wanted branch, in order, look for it in our clone, + # and return the name if found. + for B in wanted_branches: + try: + branch = ( + subprocess.check_output( + "cd %s && git rev-parse --abbrev-ref %s" % (datapath, B), + shell=True, + stderr=subprocess.DEVNULL, + ) + .decode("ascii", "replace") + .strip() + .strip("* ") + ) + return branch + except: + pass + # If we couldn't find it locally, looking at all (local+remote) + try: + inp = ( + subprocess.check_output( + "cd %s && git branch -a | awk -F ' +' '! /\(no branch\)/ {print $2}'" + % datapath, + shell=True, + stderr=subprocess.DEVNULL, + ) + .decode("ascii", "replace") + .split() + ) + if len(inp) > 0: + for b in sorted(inp): + if b.find("detached") == -1: + branch = str(b.replace("remotes/origin/", "", 1)) + for B in wanted_branches: + if branch == B: + return branch + except: + pass + + # If still not found, resort to whatever branch comes first in the remote listing... + inp = ( + subprocess.check_output( + "cd %s && git ls-remote --heads %s" % (datapath, source["sourceURL"]), + shell=True, + stderr=subprocess.DEVNULL, + ) + .decode("ascii", "replace") + .split() + ) + if len(inp) > 0: + for remote in inp: + m = re.match(r"[a-f0-9]+\s+refs/heads/(?:remotes/)?(.+)", remote) + if m: + branch = m.group(1) + return branch + # Give up + return "" diff --git a/kibble/scanners/utils/github.py b/kibble/scanners/utils/github.py new file mode 100644 index 00000000..95b30ea0 --- /dev/null +++ b/kibble/scanners/utils/github.py @@ -0,0 +1,97 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +""" GitHub utility library """ +import re +import requests +import time + +repo_pattern = re.compile(".*[:/]([^/]+)/([^/]+).git") +issues_api = "https://api.github.com/repos/%s/%s/issues" +traffic_api = "https://api.github.com/repos/%s/%s/traffic" +popular_api = "https://api.github.com/repos/%s/%s/popular" +rate_limit_api = "https://api.github.com/rate_limit" + + +def get_limited(url, params=None, auth=None): + """ Get a GitHub API response, keeping in mind that we may + be rate-limited by the abuse system """ + number_of_retries = 0 + resp = requests.get(url, params=params, auth=auth) + while resp.status_code == 403 and number_of_retries < 20: + js = resp.json() + # If abuse-detection kicks in, sleep it off + if "You have triggered an abuse" in js["message"]: + time.sleep(5) + number_of_retries += 1 + resp = requests.get(url, params=params, auth=auth) + else: + break + resp.raise_for_status() + return resp.json() + + +def get_tokens_left(auth=None): + """ Gets number of GitHub tokens left this hour... """ + js = get_limited(rate_limit_api, auth=auth) + tokens_left = js["rate"]["remaining"] + return tokens_left + + +def issues(source, params={}, auth=None): + local_params = {"per_page": 100, "page": 1} + local_params.update(params) + + repo_user = repo_pattern.findall(source["sourceURL"])[0] + return get_limited(issues_api % repo_user, params=local_params, auth=auth) + + +def views(source, auth=None): + repo_user = repo_pattern.findall(source["sourceURL"])[0] + return get_limited("%s/views" % (traffic_api % repo_user), auth=auth) + + +def clones(source, auth=None): + repo_user = repo_pattern.findall(source["sourceURL"])[0] + return get_limited("%s/clones" % (traffic_api % repo_user), auth=auth) + + +def referrers(source, auth=None): + repo_user = repo_pattern.findall(source["sourceURL"])[0] + return get_limited("%s/referrers" % (popular_api % repo_user), auth=auth) + + +def user(user_url, auth=None): + return get_limited(user_url, auth=auth) + + +def get_all(source, f, params={}, auth=None): + acc = [] + page = params.get("page", 1) + + while True: + time.sleep(1.5) + items = f(source, params=params, auth=auth) + if not items: + break + + acc.extend(items) + + page = page + 1 + params.update({"page": page}) + + return acc diff --git a/kibble/scanners/utils/jsonapi.py b/kibble/scanners/utils/jsonapi.py new file mode 100644 index 00000000..28fc4747 --- /dev/null +++ b/kibble/scanners/utils/jsonapi.py @@ -0,0 +1,108 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +""" +This is a Kibble JSON API plugin. +""" +import requests +import time +import base64 + +CONNECT_TIMEOUT = 2 # Max timeout for the connect part of a request. + + +# Should be set low as it may otherwise freeze the scanner. +def get(url, cookie=None, auth=None, token=None, retries=5, timeout=30): + headers = { + "Content-type": "application/json", + "Accept": "application/json", + "User-Agent": "Apache Kibble", + } + if auth: + xcreds = auth.encode(encoding="ascii", errors="replace") + bauth = ( + base64.encodebytes(xcreds) + .decode("ascii", errors="replace") + .replace("\n", "") + ) + headers["Authorization"] = "Basic %s" % bauth + if token: + headers["Authorization"] = "token %s" % token + if cookie: + headers["Cookie"] = cookie + rv = requests.get(url, headers=headers, timeout=(CONNECT_TIMEOUT, timeout)) + # Some services may be rate limited. We'll try sleeping it off in 60 second + # intervals for a max of five minutes, then give up. + if rv.status_code == 429: + if retries > 0: + time.sleep(60) + retries -= 1 + return get( + url, + cookie=cookie, + auth=auth, + token=token, + retries=retries, + timeout=timeout, + ) + if rv.status_code < 400: + return rv.json() + raise requests.exceptions.ConnectionError( + "Could not fetch JSON, server responded with status code %u" % rv.status_code, + response=rv, + ) + + +def gettxt(url, cookie=None, auth=None): + """ Same as above, but returns as text blob """ + headers = {"Content-type": "application/json", "Accept": "*/*"} + if auth: + xcreds = auth.encode(encoding="ascii", errors="replace") + bauth = ( + base64.encodebytes(xcreds) + .decode("ascii", errors="replace") + .replace("\n", "") + ) + headers["Authorization"] = "Basic %s" % bauth + if cookie: + headers["Cookie"] = cookie + rv = requests.get(url, headers=headers) + js = rv.text + if rv.status_code != 404: + return js + return None + + +def post(url, data, cookie=None, auth=None): + headers = { + "Content-type": "application/json", + "Accept": "*/*", + "User-Agent": "Apache Kibble", + } + if auth: + xcreds = auth.encode(encoding="ascii", errors="replace") + bauth = ( + base64.encodebytes(xcreds) + .decode("ascii", errors="replace") + .replace("\n", "") + ) + headers["Authorization"] = "Basic %s" % bauth + if cookie: + headers["Cookie"] = cookie + rv = requests.post(url, headers=headers, json=data) + js = rv.json() + return js diff --git a/kibble/scanners/utils/kpe.py b/kibble/scanners/utils/kpe.py new file mode 100644 index 00000000..3589019f --- /dev/null +++ b/kibble/scanners/utils/kpe.py @@ -0,0 +1,169 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +""" +This is an experimental key phrase extraction plugin for using +Azure/picoAPI for analyzing the key elements of an email on a list. This +requires an account with a text analysis service provider, and a +corresponding API section in config.yaml, as such: + +# picoAPI example: +picoapi: + key: abcdef1234567890 + +# Azure example: +azure: + apikey: abcdef1234567890 + location: westeurope + +Currently only pony mail is supported. more to come. +""" + +import re +import requests +import json + + +def trimBody(body): + """ Quick function for trimming away the fat from emails """ + # Cut away "On $date, jane doe wrote: " kind of texts + body = re.sub( + r"(((?:\r?\n|^)((on .+ wrote:[\r\n]+)|(sent from my .+)|(>+[ \t]*[^\r\n]*\r?\n[^\n]*\n*)+)+)+)", + "", + body, + flags=re.I | re.M, + ) + + # Crop out quotes + lines = body.split("\n") + body = "\n".join([x for x in lines if not x.startswith(">")]) + + # Remove hyperlinks + body = re.sub(r"[a-z]+://\S+", "", body) + + # Remove email addresses + body = re.sub(r"(<[^>]+>\s*\S+@\S+)", "", body) + body = re.sub(r"(\S+@\S+)", "", body) + return body + + +def azureKPE(KibbleBit, bodies): + """ KPE using Azure Text Analysis API """ + if "azure" in KibbleBit.config: + headers = { + "Content-Type": "application/json", + "Ocp-Apim-Subscription-Key": KibbleBit.config["azure"]["apikey"], + } + + js = {"documents": []} + + # For each body... + a = 0 + KPEs = [] + for body in bodies: + # Crop out quotes + lines = body.split("\n") + body = trimBody(body) + doc = {"language": "en", "id": str(a), "text": body} + js["documents"].append(doc) + KPEs.append({}) # placeholder for each doc, to be replaced + a += 1 + try: + rv = requests.post( + "https://%s.api.cognitive.microsoft.com/text/analytics/v2.0/keyPhrases" + % KibbleBit.config["azure"]["location"], + headers=headers, + data=json.dumps(js), + ) + jsout = rv.json() + except: + jsout = {} # borked sentiment analysis? + + if "documents" in jsout and len(jsout["documents"]) > 0: + for doc in jsout["documents"]: + KPEs[int(doc["id"])] = doc["keyPhrases"][ + :5 + ] # Replace KPEs[X] with the actual phrases, 5 first ones. + + else: + KibbleBit.pprint("Failed to analyze email body.") + print(jsout) + # Depending on price tier, Azure will return a 429 if you go too fast. + # If we see a statusCode return, let's just stop for now. + # Later scans can pick up the slack. + if "statusCode" in jsout: + KibbleBit.pprint("Possible rate limiting in place, stopping for now.") + return False + return KPEs + + +def picoKPE(KibbleBit, bodies): + """ KPE using picoAPI Text Analysis """ + if "picoapi" in KibbleBit.config: + headers = { + "Content-Type": "application/json", + "PicoAPI-Key": KibbleBit.config["picoapi"]["key"], + } + + js = {"texts": []} + + # For each body... + a = 0 + KPEs = [] + for body in bodies: + body = trimBody(body) + + doc = {"id": str(a), "body": body} + js["texts"].append(doc) + KPEs.append({}) # placeholder for each doc, to be replaced + a += 1 + try: + rv = requests.post( + "https://v1.picoapi.com/api/text/keyphrase", + headers=headers, + data=json.dumps(js), + ) + jsout = rv.json() + except: + jsout = {} # borked sentiment analysis? + + if "results" in jsout and len(jsout["results"]) > 0: + for doc in jsout["results"]: + phrases = [] + # This is a bit different than Azure, in that it has a weighting score + # So we need to just extract key phrases above a certain level. + # Grab up o 5 key phrases per text + MINIMUM_WEIGHT = 0.02 + for element in doc["keyphrases"]: + if element["score"] > MINIMUM_WEIGHT: + phrases.append(element["phrase"]) + if len(phrases) == 5: + break + KPEs[ + int(doc["id"]) + ] = phrases # Replace KPEs[X] with the actual phrases + + else: + KibbleBit.pprint("Failed to analyze email body.") + print(jsout) + # 403 returned on invalid key, 429 on rate exceeded. + # If we see a code return, let's just stop for now. + # Later scans can pick up the slack. + if "code" in jsout: + KibbleBit.pprint("Possible rate limiting in place, stopping for now.") + return False + return KPEs diff --git a/kibble/scanners/utils/sloc.py b/kibble/scanners/utils/sloc.py new file mode 100644 index 00000000..46ab9eda --- /dev/null +++ b/kibble/scanners/utils/sloc.py @@ -0,0 +1,78 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +""" This is the SLoC counter utility for Kibble """ + +import subprocess +import re +import multiprocessing + + +def count(path): + """ Count lines of Code """ + # We determine how many cores there are, and adjust the + # process count based on that. Max 4 procs. + my_core_count = min((4, int(multiprocessing.cpu_count()))) + inp = subprocess.check_output( + "cloc --quiet --progress-rate=0 --processes=%u %s" % (my_core_count, path), + shell=True, + ).decode("ascii", "replace") + m = re.search( + r".*Language\s+files\s+blank\s+comment\s+code[\s\S]+?-+([\s\S]+?)-+[\s\S]+?SUM:\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)", + inp, + flags=re.MULTILINE | re.UNICODE, + ) + languages = {} + ccount = 0 + years = 0 + cost = 0 + codecount = "" + comment = "" + blank = "" + if m: + lingos = m.group(1) + fcount = m.group(2) + blank = m.group(3) + comment = m.group(4) + codecount = m.group(5) + for lm in re.finditer( + r"([A-Za-z +-/0-9]+)\s+\d+\s+(\d+)\s+(\d+)\s+(\d+)", lingos + ): + lang = lm.group(1).replace(" Header", "").lower() + lang = re.sub(r"\s\s+", "", lang) + lang = re.sub(r"^[Cc]\\?/", "", lang) + lang = lang.replace(".", "_") + if len(lang) > 0: + C = 0 + D = 0 + E = 0 + if lang in languages: + C = languages[lang]["code"] + D = languages[lang]["comment"] + E = languages[lang]["blank"] + languages[lang] = { + "code": int(lm.group(4)) + C, + "comment": int(lm.group(3)) + D, + "blank": int(lm.group(2)) + E, + } + ccount = int(codecount.replace(",", "")) + int(comment.replace(",", "")) + codecount = int(codecount.replace(",,", "")) + blank = int(blank.replace(",,", "")) + comment = int(comment.replace(",,", "")) + years = ccount / 3300.0 + cost = years * 72000 + return [languages, codecount, comment, blank, years, cost] diff --git a/kibble/scanners/utils/tone.py b/kibble/scanners/utils/tone.py new file mode 100644 index 00000000..87e945da --- /dev/null +++ b/kibble/scanners/utils/tone.py @@ -0,0 +1,197 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +""" +This is an experimental tone analyzer plugin for using Watson/BlueMix for +analyzing the mood of email on a list. This requires a Watson account +and a watson section in config.yaml, as such: + +watson: + username: $user + password: $pass + api: https://$something.watsonplatform.net/tone-analyzer/api + +Currently only pony mail is supported. more to come. +""" + +import requests +import json + + +def watsonTone(KibbleBit, bodies): + """ Sentiment analysis using IBM Watson """ + if "watson" in KibbleBit.config: + headers = {"Content-Type": "application/json"} + + # Crop out quotes + for body in bodies: + lines = body.split("\n") + body = "\n".join([x for x in lines if not x.startswith(">")]) + + js = {"text": body} + try: + rv = requests.post( + "%s/v3/tone?version=2017-09-21&sentences=false" + % KibbleBit.config["watson"]["api"], + headers=headers, + data=json.dumps(js), + auth=( + KibbleBit.config["watson"]["username"], + KibbleBit.config["watson"]["password"], + ), + ) + jsout = rv.json() + except: + jsout = {} # borked Watson? + mood = {} + if "document_tone" in jsout: + for tone in jsout["document_tone"]["tones"]: + mood[tone["tone_id"]] = tone["score"] + else: + KibbleBit.pprint("Failed to analyze email body.") + yield mood + + +def azureTone(KibbleBit, bodies): + """ Sentiment analysis using Azure Text Analysis API """ + if "azure" in KibbleBit.config: + headers = { + "Content-Type": "application/json", + "Ocp-Apim-Subscription-Key": KibbleBit.config["azure"]["apikey"], + } + + js = {"documents": []} + + # For each body... + a = 0 + moods = [] + for body in bodies: + # Crop out quotes + lines = body.split("\n") + body = "\n".join([x for x in lines if not x.startswith(">")]) + doc = {"language": "en", "id": str(a), "text": body} + js["documents"].append(doc) + moods.append({}) # placeholder for each doc, to be replaced + a += 1 + try: + rv = requests.post( + "https://%s.api.cognitive.microsoft.com/text/analytics/v2.0/sentiment" + % KibbleBit.config["azure"]["location"], + headers=headers, + data=json.dumps(js), + ) + jsout = rv.json() + except: + jsout = {} # borked sentiment analysis? + + if "documents" in jsout and len(jsout["documents"]) > 0: + for doc in jsout["documents"]: + mood = {} + # This is more parred than Watson, so we'll split it into three groups: positive, neutral and negative. + # Divide into four segments, 0->40%, 25->75% and 60->100%. + # 0-40 promotes negative, 60-100 promotes positive, and 25-75% promotes neutral. + # As we don't want to over-represent negative/positive where the results are + # muddy, the neutral zone is larger than the positive/negative zones by 10%. + val = doc["score"] + mood["negative"] = max( + 0, ((0.4 - val) * 2.5) + ) # For 40% and below, use 2½ distance + mood["positive"] = max( + 0, ((val - 0.6) * 2.5) + ) # For 60% and above, use 2½ distance + mood["neutral"] = max( + 0, 1 - (abs(val - 0.5) * 2) + ) # Between 25% and 75% use double the distance to middle. + moods[int(doc["id"])] = mood # Replace moods[X] with the actual mood + + else: + KibbleBit.pprint("Failed to analyze email body.") + print(jsout) + # Depending on price tier, Azure will return a 429 if you go too fast. + # If we see a statusCode return, let's just stop for now. + # Later scans can pick up the slack. + if "statusCode" in jsout: + KibbleBit.pprint("Possible rate limiting in place, stopping for now.") + return False + return moods + + +def picoTone(KibbleBit, bodies): + """ Sentiment analysis using picoAPI Text Analysis """ + if "picoapi" in KibbleBit.config: + headers = { + "Content-Type": "application/json", + "PicoAPI-Key": KibbleBit.config["picoapi"]["key"], + } + + js = {"texts": []} + + # For each body... + a = 0 + moods = [] + for body in bodies: + # Crop out quotes + lines = body.split("\n") + body = "\n".join([x for x in lines if not x.startswith(">")]) + doc = {"id": str(a), "body": body} + js["texts"].append(doc) + moods.append({}) # placeholder for each doc, to be replaced + a += 1 + try: + rv = requests.post( + "https://v1.picoapi.com/api/text/sentiment", + headers=headers, + data=json.dumps(js), + ) + jsout = rv.json() + except: + jsout = {} # borked sentiment analysis? + + if "results" in jsout and len(jsout["results"]) > 0: + for doc in jsout["results"]: + mood = {} + + # Sentiment is the overall score, and we use that for the neutrality of a text + val = (1 + doc["sentiment"]) / 2 + + mood["negative"] = doc[ + "negativity" + ] # Use the direct Bayesian score from picoAPI + mood["positive"] = doc[ + "positivity" + ] # Use the direct Bayesian score from picoAPI + mood["neutral"] = doc[ + "neutrality" + ] # Calc neutrality to favor a middle sentiment score, ignore high/low + + # Additional (optional) emotion weighting + if "emotions" in doc: + for k, v in doc["emotions"].items(): + mood[k] = v / 100 # Value is betwen 0 and 100. + + moods[int(doc["id"])] = mood # Replace moods[X] with the actual mood + + else: + KibbleBit.pprint("Failed to analyze email body.") + print(jsout) + # 403 returned on invalid key, 429 on rate exceeded. + # If we see a code return, let's just stop for now. + # Later scans can pick up the slack. + if "code" in jsout: + KibbleBit.pprint("Possible rate limiting in place, stopping for now.") + return False + return moods diff --git a/kibble/scanners/utils/urlmisc.py b/kibble/scanners/utils/urlmisc.py new file mode 100644 index 00000000..8f655ba6 --- /dev/null +++ b/kibble/scanners/utils/urlmisc.py @@ -0,0 +1,79 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +""" +This is a Kibble miscellaneous URL functions plugin. +""" +import base64 +import urllib.request +import gzip +import tempfile +import io +import subprocess + + +def unzip(url, creds=None, cookie=None): + """ Attempts to download an unzip an archive. Returns the + temporary file path of the unzipped contents """ + headers = {} + if creds: + auth = str(base64.encodestring(bytes(creds)).replace("\n", "")) + headers = { + "Content-type": "application/json", + "Accept": "*/*", + "Authorization": "Basic %s" % auth, + } + if cookie: + headers = { + "Content-type": "application/json", + "Accept": "*/*", + "Cookie": cookie, + } + request = urllib.request.Request(url, headers=headers) + # Try fechthing via python, fall back to wget (redhat == broken!) + decompressedFile = None + try: + result = urllib.request.urlopen(request) + compressedFile = io.BytesIO() + compressedFile.write(result.read()) + compressedFile.seek(0) + decompressedFile = gzip.GzipFile(fileobj=compressedFile, mode="rb") + except urllib.error.HTTPError as err: + # We're not interested in 404s, only transport errors + if err.code != 404 and err.code != 401: + tmpfile = tempfile.NamedTemporaryFile(mode="w+b", buffering=1, delete=False) + subprocess.check_call(("/usr/bin/wget", "-O", tmpfile.name, url)) + + try: + te + compressedFile = open("/tmp/kibbletmp.gz", "rb") + if compressedFile.read(2) == "\x1f\x8b": + compressedFile.seek(0) + decompressedFile = gzip.GzipFile(fileobj=compressedFile, mode="rb") + else: + compressedFile.close() + return tmpfile.name + except: + # Probably not a gzipped file! + decompressedFile = open(tmpfile.name, "rb") + if decompressedFile: + tmpfile = tempfile.NamedTemporaryFile(mode="w+b", buffering=1, delete=False) + tmpfile.write(decompressedFile.read()) + tmpfile.flush() + tmpfile.close() + return tmpfile.name + return None diff --git a/setup.py b/setup.py index 6c3de19a..dfa5ebdc 100644 --- a/setup.py +++ b/setup.py @@ -35,8 +35,11 @@ "certifi==2020.6.20", "elasticsearch==7.9.1", "gunicorn==20.0.4", + "psutil==5.7.3", "python-dateutil==2.8.1", + "python-twitter==3.5", "PyYAML==5.3.1", + "requests==2.24.0", "tenacity==6.2.0", ] From 4884cf5af74d99974e1c4df1de5eedffe14f7165 Mon Sep 17 00:00:00 2001 From: Daniel Gruno Date: Thu, 12 Nov 2020 09:15:35 +0100 Subject: [PATCH 18/48] GitHub uses main as default branch, so we should obviously look for that. --- kibble/scanners/utils/git.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kibble/scanners/utils/git.py b/kibble/scanners/utils/git.py index c7809193..bd712524 100644 --- a/kibble/scanners/utils/git.py +++ b/kibble/scanners/utils/git.py @@ -25,7 +25,7 @@ def defaultBranch(source, datapath, KibbleBit=None): """ Tries to figure out what the main branch of a repo is """ - wanted_branches = ["master", "trunk"] + wanted_branches = ["master", "main", "trunk"] branch = "" # If we have an override of branches we like, use 'em if KibbleBit and KibbleBit.config.get("git"): From 7d70dd5644454876ce3403d945ae823449eb788e Mon Sep 17 00:00:00 2001 From: Sharvil Kekre Date: Sat, 14 Nov 2020 09:48:42 -0800 Subject: [PATCH 19/48] Refactor setup scripts as kibble cli command (#91) --- docker-compose-dev.yaml | 2 +- kibble/__main__.py | 69 ++++++++- .../{setup/setup.py => cli/setup_command.py} | 136 +++++------------- setup.py | 1 + 4 files changed, 103 insertions(+), 105 deletions(-) rename kibble/{setup/setup.py => cli/setup_command.py} (66%) diff --git a/docker-compose-dev.yaml b/docker-compose-dev.yaml index 7744b998..3e67c369 100644 --- a/docker-compose-dev.yaml +++ b/docker-compose-dev.yaml @@ -7,7 +7,7 @@ services: build: context: . dockerfile: Dockerfile.dev - command: bash -c "python kibble/setup/setup.py --autoadmin --skiponexist" + command: bash -c "kibble setup --autoadmin --skiponexist" volumes: - .:/kibble/ depends_on: diff --git a/kibble/__main__.py b/kibble/__main__.py index aea89fd1..b63d27c1 100644 --- a/kibble/__main__.py +++ b/kibble/__main__.py @@ -15,9 +15,76 @@ # specific language governing permissions and limitations # under the License. +import click + +from kibble.cli import setup_command +from kibble.version import version as kibble_version + +from kibble.configuration import conf + + +@click.group() +def cli(): + """A simple command line tool for kibble""" + + +@cli.command("version", short_help="displays the current kibble version") +def version(): + click.echo(kibble_version) + + +@cli.command("setup", short_help="starts the setup process for kibble") +@click.option( + "-u", + "--uri", + default=conf.get("elasticsearch", "conn_uri"), + help="connection uri for ElasticSearch", +) +@click.option( + "-d", + "--dbname", + default=conf.get("elasticsearch", "dbname"), + help="elasticsearch database prefix", +) +@click.option( + "-s", + "--shards", + default=conf.get("elasticsearch", "shards"), + help="number of ES shards", +) +@click.option( + "-r", + "--replicas", + default=conf.get("elasticsearch", "replicas"), + help="number of replicas for ES", +) +@click.option( + "-m", "--mailhost", default=conf.get("mail", "mailhost"), help="mail server host" +) +@click.option("-a", "--autoadmin", default=False, help="generate generic admin account") +@click.option("-k", "--skiponexist", default=True, help="skip DB creation if DBs exist") +def setup( + uri: str, + dbname: str, + shards: str, + replicas: str, + mailhost: str, + autoadmin: bool, + skiponexist: bool, +): + setup_command.do_setup( + uri=uri, + dbname=dbname, + shards=shards, + replicas=replicas, + mailhost=mailhost, + autoadmin=autoadmin, + skiponexist=skiponexist, + ) + def main(): - print("Hello to kibble!") + cli() if __name__ == "__main__": diff --git a/kibble/setup/setup.py b/kibble/cli/setup_command.py similarity index 66% rename from kibble/setup/setup.py rename to kibble/cli/setup_command.py index 79d67a92..842701c9 100644 --- a/kibble/setup/setup.py +++ b/kibble/cli/setup_command.py @@ -15,12 +15,12 @@ # specific language governing permissions and limitations # under the License. -import sys import os -import argparse +import sys import logging from getpass import getpass +import click import tenacity import bcrypt import json @@ -28,63 +28,15 @@ from kibble.configuration import conf - KIBBLE_VERSION = conf.get("api", "version") -KIBBLE_DB_VERSION = conf.get("api", "database") # database revision +KIBBLE_DB_VERSION = conf.get("api", "database") -if sys.version_info <= (3, 3): - print("This script requires Python 3.4 or higher") - sys.exit(-1) - -# Arguments for non-interactive setups like docker -def get_parser(): - arg_parser = argparse.ArgumentParser() - arg_parser.add_argument( - "-e", - "--conn-uri", - help="Pre-defined connection uri for ElasticSearch.", - default=conf.get("elasticsearch", "conn_uri"), - ) - arg_parser.add_argument( - "-d", - "--dbname", - help="Pre-defined Database prefix. Default: kibble", - default=conf.get("elasticsearch", "dbname"), - ) - arg_parser.add_argument( - "-s", - "--shards", - help="Predefined number of ES shards, Default: 5", - default=conf.get("elasticsearch", "shards"), - ) - arg_parser.add_argument( - "-r", - "--replicas", - help="Predefined number of replicas for ES. Default: 1", - default=conf.get("elasticsearch", "replicas"), - ) - arg_parser.add_argument( - "-m", - "--mailhost", - help="Pre-defined mail server host. Default: localhost:25", - default=conf.get("mail", "mailhost"), - ) - arg_parser.add_argument( - "-a", - "--autoadmin", - action="store_true", - help="Generate generic admin account. Default: False", - default=False, - ) - arg_parser.add_argument( - "-k", - "--skiponexist", - action="store_true", - help="Skip DB creation if DBs exist. Defaul: True", - default=True, - ) - return arg_parser +def get_user_input(msg: str, secure: bool = False): + value = None + while not value: + value = getpass(msg) if secure else input(msg) + return value def create_es_index( @@ -102,12 +54,13 @@ def create_es_index( logging.getLogger("elasticsearch").setLevel(logging.ERROR) mappings_json = os.path.join( - os.path.dirname(os.path.realpath(__file__)), "mappings.json" + os.path.dirname(os.path.realpath(__file__)), "../setup/mappings.json" ) with open(mappings_json, "r") as f: mappings = json.load(f) es = Elasticsearch([conn_uri], max_retries=5, retry_on_timeout=True) + print(es.info()) es_version = es.info()["version"]["number"] es6 = int(es_version.split(".")[0]) >= 6 @@ -187,7 +140,7 @@ def create_es_index( es.indices.create( index=iname, body={"mappings": mappings["mappings"], "settings": settings} ) - print(f"Indices created!") + print(f"Indices created!\n") print() salt = bcrypt.gensalt() @@ -212,49 +165,30 @@ def create_es_index( print("Account created!") -def get_user_input(msg: str, secure: bool = False): - value = None - while not value: - value = getpass(msg) if secure else input(msg) - return value - - -def print_configuration(args): - print( - "Configuring Apache Kibble elasticsearch instance with the following arguments:" - ) - print(f"- conn_uri: {args.conn_uri}") - print(f"- dbname: {args.dbname}") - print(f"- shards: {int(args.shards)}") - print(f"- replicas: {int(args.replicas)}") - print() - - -def main(): - """ - The main Kibble setup logic. Using users input we create: - - Elasticsearch indexes used by Apache Kibble app - - Configuration yaml file - """ - parser = get_parser() - args = parser.parse_args() - - print("Welcome to the Apache Kibble setup script!") - print_configuration(args) +def do_setup( + uri: str, + dbname: str, + shards: str, + replicas: str, + mailhost: str, + autoadmin: bool, + skiponexist: bool, +): + click.echo("Welcome to the Apache Kibble setup script!") admin_name = "admin@kibble" admin_pass = "kibbleAdmin" - if not args.autoadmin: + if not autoadmin: admin_name = get_user_input( - "Enter an email address for the administrator account:" + "Enter an email address for the administrator account: " ) admin_pass = get_user_input( - "Enter a password for the administrator account:", secure=True + "Enter a password for the administrator account: ", secure=True ) # Create Elasticsearch index # Retry in case ES is not yet up - print(f"Elasticsearch: {args.conn_uri}") + click.echo(f"Elasticsearch: {uri}") for attempt in tenacity.Retrying( retry=tenacity.retry_if_exception_type(exception_types=Exception), wait=tenacity.wait_fixed(10), @@ -262,19 +196,15 @@ def main(): reraise=True, ): with attempt: - print("Trying to create ES index...") + click.echo("Trying to create ES index...") create_es_index( - conn_uri=args.conn_uri, - dbname=args.dbname, - shards=int(args.shards), - replicas=int(args.replicas), + conn_uri=uri, + dbname=dbname, + shards=int(shards), + replicas=int(replicas), admin_name=admin_name, admin_pass=admin_pass, - skiponexist=args.skiponexist, + skiponexist=skiponexist, ) - print() - print("All done, Kibble should...work now :)") - - -if __name__ == "__main__": - main() + click.echo() + click.echo("All done, Kibble should...work now :)") diff --git a/setup.py b/setup.py index dfa5ebdc..568b3162 100644 --- a/setup.py +++ b/setup.py @@ -33,6 +33,7 @@ INSTALL_REQUIREMENTS = [ "bcrypt==3.2.0", "certifi==2020.6.20", + "click==7.1.2", "elasticsearch==7.9.1", "gunicorn==20.0.4", "psutil==5.7.3", From 3d03a2446de99e4a3dd480508b61a2d4f7746cf5 Mon Sep 17 00:00:00 2001 From: Tomek Urbaszek Date: Sat, 14 Nov 2020 23:28:28 +0100 Subject: [PATCH 20/48] Build docker image on CI (#92) To check that kibble works we can at least build the docker image, run kibble --help and validate python requirements. --- .github/labeler.yml | 6 ++++++ .github/workflows/ci.yaml | 15 +++++++++++++-- .github/workflows/labeler.yaml | 3 ++- setup.py | 2 +- 4 files changed, 22 insertions(+), 4 deletions(-) diff --git a/.github/labeler.yml b/.github/labeler.yml index fff54b4c..299dda74 100644 --- a/.github/labeler.yml +++ b/.github/labeler.yml @@ -1,6 +1,12 @@ area:api: - 'kibble/api/*' +area:cli: + - 'kibble/cli/*' + +area:scanners: + - 'kibble/scanners/*' + area:ui: - 'ui/*' diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index f7f27c0f..fc571d8b 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -22,9 +22,20 @@ on: jobs: statics: - name: Static Checks + name: Static checks runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - - uses: actions/setup-python@v1 + - uses: actions/setup-python@v2 - uses: pre-commit/action@v1.0.1 + build-docker: + name: Build kibble dev image + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: Build docker image + run: docker-compose -f docker-compose-dev.yaml build setup + - name: Run kibble command + run: docker run apache/kibble kibble --help + - name: Check dependencies + run: docker run apache/kibble pip check diff --git a/.github/workflows/labeler.yaml b/.github/workflows/labeler.yaml index 4415f001..103e3e44 100644 --- a/.github/workflows/labeler.yaml +++ b/.github/workflows/labeler.yaml @@ -1,9 +1,10 @@ -name: "Pull Request Labeler" +name: "PR labeler" on: - pull_request_target jobs: triage: + name: Label runs-on: ubuntu-latest steps: - uses: actions/labeler@main diff --git a/setup.py b/setup.py index 568b3162..6262a087 100644 --- a/setup.py +++ b/setup.py @@ -40,7 +40,7 @@ "python-dateutil==2.8.1", "python-twitter==3.5", "PyYAML==5.3.1", - "requests==2.24.0", + "requests==2.25.0", "tenacity==6.2.0", ] From 639f10c260dcb3d9f2fa1098d4ee0ca30ecc9aeb Mon Sep 17 00:00:00 2001 From: Tomek Urbaszek Date: Tue, 17 Nov 2020 10:36:03 +0100 Subject: [PATCH 21/48] Add isort pre-commit hook (#96) --- .pre-commit-config.yaml | 7 +++++++ kibble/__main__.py | 3 +-- kibble/api/handler.py | 6 +++--- kibble/api/pages/account.py | 7 ++++--- kibble/api/pages/bio/bio.py | 2 +- kibble/api/pages/bio/newtimers.py | 2 +- kibble/api/pages/ci/queue.py | 2 +- kibble/api/pages/ci/status.py | 2 +- kibble/api/pages/ci/top-buildcount.py | 2 +- kibble/api/pages/ci/top-buildtime.py | 2 +- kibble/api/pages/code/commits.py | 2 +- kibble/api/pages/code/committers.py | 2 +- kibble/api/pages/code/evolution.py | 2 +- kibble/api/pages/code/pony-timeseries.py | 5 +++-- kibble/api/pages/code/pony.py | 2 +- kibble/api/pages/code/punchcard.py | 2 +- kibble/api/pages/code/relationships.py | 8 ++++---- kibble/api/pages/code/retention.py | 4 ++-- kibble/api/pages/code/top-commits.py | 2 +- kibble/api/pages/code/top-sloc.py | 2 +- kibble/api/pages/forum/actors.py | 2 +- kibble/api/pages/forum/creators.py | 2 +- kibble/api/pages/forum/issues.py | 3 ++- kibble/api/pages/forum/responders.py | 2 +- kibble/api/pages/forum/top-count.py | 2 +- kibble/api/pages/forum/top.py | 2 +- kibble/api/pages/issue/actors.py | 2 +- kibble/api/pages/issue/age.py | 2 +- kibble/api/pages/issue/closers.py | 2 +- kibble/api/pages/issue/issues.py | 3 ++- kibble/api/pages/issue/openers.py | 2 +- kibble/api/pages/issue/pony-timeseries.py | 5 +++-- kibble/api/pages/issue/relationships.py | 8 ++++---- kibble/api/pages/issue/retention.py | 4 ++-- kibble/api/pages/issue/top-count.py | 2 +- kibble/api/pages/issue/top.py | 2 +- kibble/api/pages/mail/keyphrases.py | 2 +- kibble/api/pages/mail/map.py | 8 ++++---- kibble/api/pages/mail/pony-timeseries.py | 5 +++-- kibble/api/pages/mail/relationships.py | 8 ++++---- kibble/api/pages/mail/retention.py | 4 ++-- kibble/api/pages/mail/timeseries-single.py | 2 +- kibble/api/pages/mail/timeseries.py | 2 +- kibble/api/pages/mail/top-authors.py | 4 ++-- kibble/api/pages/mail/top-topics.py | 2 +- kibble/api/pages/mail/trends.py | 2 +- kibble/api/pages/org/contributors.py | 2 +- kibble/api/pages/org/members.py | 2 +- kibble/api/pages/org/sourcetypes.py | 2 +- kibble/api/pages/session.py | 5 +++-- kibble/api/pages/sources.py | 3 ++- kibble/api/pages/views.py | 2 +- kibble/api/pages/widgets.py | 2 +- kibble/api/plugins/openapi.py | 5 +++-- kibble/api/plugins/session.py | 4 ++-- kibble/api/yaml/openapi/combine.py | 5 +++-- kibble/cli/setup_command.py | 6 +++--- kibble/scanners/brokers/kibbleES.py | 3 ++- kibble/scanners/kibble-scanner.py | 7 ++++--- kibble/scanners/scanners/bugzilla.py | 9 +++++---- kibble/scanners/scanners/buildbot.py | 4 ++-- kibble/scanners/scanners/discourse.py | 6 +++--- kibble/scanners/scanners/gerrit.py | 7 ++++--- kibble/scanners/scanners/git-census.py | 8 ++++---- kibble/scanners/scanners/git-evolution.py | 6 +++--- kibble/scanners/scanners/git-sloc.py | 2 +- kibble/scanners/scanners/github-issues.py | 5 +++-- kibble/scanners/scanners/jenkins.py | 4 ++-- kibble/scanners/scanners/jira.py | 5 +++-- kibble/scanners/scanners/pipermail.py | 12 ++++++------ kibble/scanners/scanners/ponymail-kpe.py | 2 +- kibble/scanners/scanners/ponymail-tone.py | 2 +- kibble/scanners/scanners/ponymail.py | 6 +++--- kibble/scanners/scanners/travis.py | 5 +++-- kibble/scanners/scanners/twitter.py | 3 ++- kibble/scanners/utils/git.py | 4 ++-- kibble/scanners/utils/github.py | 3 ++- kibble/scanners/utils/jsonapi.py | 5 +++-- kibble/scanners/utils/kpe.py | 3 ++- kibble/scanners/utils/sloc.py | 4 ++-- kibble/scanners/utils/tone.py | 3 ++- kibble/scanners/utils/urlmisc.py | 4 ++-- kibble/setup/makeaccount.py | 11 +++++++---- 83 files changed, 176 insertions(+), 145 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 889d6237..c80ddd9f 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -74,3 +74,10 @@ repos: - id: black name: Black types: [python] + - repo: https://github.com/timothycrosley/isort + rev: 5.6.4 + hooks: + - id: isort + name: Run isort to sort imports + args: ['--multi-line', '3', '--trailing-comma'] + files: \.py$ diff --git a/kibble/__main__.py b/kibble/__main__.py index b63d27c1..2807c35b 100644 --- a/kibble/__main__.py +++ b/kibble/__main__.py @@ -18,9 +18,8 @@ import click from kibble.cli import setup_command -from kibble.version import version as kibble_version - from kibble.configuration import conf +from kibble.version import version as kibble_version @click.group() diff --git a/kibble/api/handler.py b/kibble/api/handler.py index 798e6685..d409b54d 100644 --- a/kibble/api/handler.py +++ b/kibble/api/handler.py @@ -21,21 +21,21 @@ and if a URL matches it runs the specific submodule's run() function. It also handles CGI parsing and exceptions in the applications. """ +import json import os import re import sys import traceback + import yaml -import json from kibble.api.plugins import openapi from kibble.api.plugins.database import KibbleDatabase from kibble.api.plugins.session import KibbleSession - +from kibble.settings import KIBBLE_YAML, YAML_DIRECTORY # Compile valid API URLs from the pages library # Allow backwards compatibility by also accepting .lua URLs -from kibble.settings import KIBBLE_YAML, YAML_DIRECTORY urls = [] if __name__ != "__main__": diff --git a/kibble/api/pages/account.py b/kibble/api/pages/account.py index 196d06a7..3b4c1193 100644 --- a/kibble/api/pages/account.py +++ b/kibble/api/pages/account.py @@ -95,13 +95,14 @@ adds, removes and edits accounts. """ +import email.message +import hashlib import json import re +import smtplib import time + import bcrypt -import hashlib -import smtplib -import email.message def sendCode(session, addr, code): diff --git a/kibble/api/pages/bio/bio.py b/kibble/api/pages/bio/bio.py index 32f62c42..9e7a60b5 100644 --- a/kibble/api/pages/bio/bio.py +++ b/kibble/api/pages/bio/bio.py @@ -65,9 +65,9 @@ This is the contributor trends renderer for Kibble """ +import hashlib import json import time -import hashlib def run(API, environ, indata, session): diff --git a/kibble/api/pages/bio/newtimers.py b/kibble/api/pages/bio/newtimers.py index 57b590a9..b07cc504 100644 --- a/kibble/api/pages/bio/newtimers.py +++ b/kibble/api/pages/bio/newtimers.py @@ -65,9 +65,9 @@ This is the newtimers list renderer for Kibble """ +import hashlib import json import time -import hashlib def find_earlier(session, query, when, who, which, where, doctype, dOrg): diff --git a/kibble/api/pages/ci/queue.py b/kibble/api/pages/ci/queue.py index d539bee8..d01e51fd 100644 --- a/kibble/api/pages/ci/queue.py +++ b/kibble/api/pages/ci/queue.py @@ -65,9 +65,9 @@ This is the CI queue timeseries renderer for Kibble """ +import hashlib import json import time -import hashlib def run(API, environ, indata, session): diff --git a/kibble/api/pages/ci/status.py b/kibble/api/pages/ci/status.py index 954c4612..c89ae14d 100644 --- a/kibble/api/pages/ci/status.py +++ b/kibble/api/pages/ci/status.py @@ -65,9 +65,9 @@ This is the CI queue status (blocked/stuck) timeseries renderer for Kibble """ +import hashlib import json import time -import hashlib def run(API, environ, indata, session): diff --git a/kibble/api/pages/ci/top-buildcount.py b/kibble/api/pages/ci/top-buildcount.py index 52a59e90..30012b09 100644 --- a/kibble/api/pages/ci/top-buildcount.py +++ b/kibble/api/pages/ci/top-buildcount.py @@ -66,8 +66,8 @@ """ import json -import time import re +import time def run(API, environ, indata, session): diff --git a/kibble/api/pages/ci/top-buildtime.py b/kibble/api/pages/ci/top-buildtime.py index f3a8c800..25005307 100644 --- a/kibble/api/pages/ci/top-buildtime.py +++ b/kibble/api/pages/ci/top-buildtime.py @@ -66,8 +66,8 @@ """ import json -import time import re +import time def run(API, environ, indata, session): diff --git a/kibble/api/pages/code/commits.py b/kibble/api/pages/code/commits.py index c5377df5..cc0bf511 100644 --- a/kibble/api/pages/code/commits.py +++ b/kibble/api/pages/code/commits.py @@ -65,9 +65,9 @@ This is the TopN committers list renderer for Kibble """ +import hashlib import json import time -import hashlib def run(API, environ, indata, session): diff --git a/kibble/api/pages/code/committers.py b/kibble/api/pages/code/committers.py index 4bc0d36d..3add5ed0 100644 --- a/kibble/api/pages/code/committers.py +++ b/kibble/api/pages/code/committers.py @@ -65,9 +65,9 @@ This is the TopN committers list renderer for Kibble """ +import hashlib import json import time -import hashlib def run(API, environ, indata, session): diff --git a/kibble/api/pages/code/evolution.py b/kibble/api/pages/code/evolution.py index 0fa9cdc5..e9b59160 100644 --- a/kibble/api/pages/code/evolution.py +++ b/kibble/api/pages/code/evolution.py @@ -65,9 +65,9 @@ This is the TopN committers list renderer for Kibble """ +import hashlib import json import time -import hashlib def run(API, environ, indata, session): diff --git a/kibble/api/pages/code/pony-timeseries.py b/kibble/api/pages/code/pony-timeseries.py index 8e1d254e..cff42df8 100644 --- a/kibble/api/pages/code/pony-timeseries.py +++ b/kibble/api/pages/code/pony-timeseries.py @@ -65,10 +65,11 @@ This is the pony factor renderer for Kibble """ +import datetime import json -import time import re -import datetime +import time + import dateutil.relativedelta diff --git a/kibble/api/pages/code/pony.py b/kibble/api/pages/code/pony.py index 2c5b48d2..68f648ad 100644 --- a/kibble/api/pages/code/pony.py +++ b/kibble/api/pages/code/pony.py @@ -66,8 +66,8 @@ """ import json -import time import re +import time def run(API, environ, indata, session): diff --git a/kibble/api/pages/code/punchcard.py b/kibble/api/pages/code/punchcard.py index f588b3b8..1a99a438 100644 --- a/kibble/api/pages/code/punchcard.py +++ b/kibble/api/pages/code/punchcard.py @@ -65,9 +65,9 @@ This is the commit punch-card renderer for Kibble """ +import hashlib import json import time -import hashlib def run(API, environ, indata, session): diff --git a/kibble/api/pages/code/relationships.py b/kibble/api/pages/code/relationships.py index 43b1a9e3..a3944a76 100644 --- a/kibble/api/pages/code/relationships.py +++ b/kibble/api/pages/code/relationships.py @@ -65,12 +65,12 @@ This is the committer relationship list renderer for Kibble """ -import json -import time -import hashlib import copy -import re +import hashlib +import json import math +import re +import time def run(API, environ, indata, session): diff --git a/kibble/api/pages/code/retention.py b/kibble/api/pages/code/retention.py index 0ae308a1..9d4fde1b 100644 --- a/kibble/api/pages/code/retention.py +++ b/kibble/api/pages/code/retention.py @@ -65,10 +65,10 @@ This is the code contributor retention factor renderer for Kibble """ +import datetime import json -import time import re -import datetime +import time def run(API, environ, indata, session): diff --git a/kibble/api/pages/code/top-commits.py b/kibble/api/pages/code/top-commits.py index 9cda87af..6d031d5e 100644 --- a/kibble/api/pages/code/top-commits.py +++ b/kibble/api/pages/code/top-commits.py @@ -66,8 +66,8 @@ """ import json -import time import re +import time def run(API, environ, indata, session): diff --git a/kibble/api/pages/code/top-sloc.py b/kibble/api/pages/code/top-sloc.py index 6950ca0f..eeb3af77 100644 --- a/kibble/api/pages/code/top-sloc.py +++ b/kibble/api/pages/code/top-sloc.py @@ -66,8 +66,8 @@ """ import json -import time import re +import time def run(API, environ, indata, session): diff --git a/kibble/api/pages/forum/actors.py b/kibble/api/pages/forum/actors.py index a9b1d18f..2962dc44 100644 --- a/kibble/api/pages/forum/actors.py +++ b/kibble/api/pages/forum/actors.py @@ -65,9 +65,9 @@ This is the forum actors stats page for Kibble """ +import hashlib import json import time -import hashlib def run(API, environ, indata, session): diff --git a/kibble/api/pages/forum/creators.py b/kibble/api/pages/forum/creators.py index 1a9e8b6d..32f636d3 100644 --- a/kibble/api/pages/forum/creators.py +++ b/kibble/api/pages/forum/creators.py @@ -65,9 +65,9 @@ This is the TopN issue openers list renderer for Kibble """ +import hashlib import json import time -import hashlib def run(API, environ, indata, session): diff --git a/kibble/api/pages/forum/issues.py b/kibble/api/pages/forum/issues.py index 8c4bbe8f..e08c2ae0 100644 --- a/kibble/api/pages/forum/issues.py +++ b/kibble/api/pages/forum/issues.py @@ -65,9 +65,10 @@ This is the forum timeseries renderer for Kibble """ +import hashlib import json import time -import hashlib + # This creates an empty timeseries object with # all categories initialized as 0 opened, 0 closed. diff --git a/kibble/api/pages/forum/responders.py b/kibble/api/pages/forum/responders.py index a25481db..b314fcd2 100644 --- a/kibble/api/pages/forum/responders.py +++ b/kibble/api/pages/forum/responders.py @@ -65,9 +65,9 @@ This is the TopN forum posters list renderer for Kibble """ +import hashlib import json import time -import hashlib def run(API, environ, indata, session): diff --git a/kibble/api/pages/forum/top-count.py b/kibble/api/pages/forum/top-count.py index 35947b08..c7a48941 100644 --- a/kibble/api/pages/forum/top-count.py +++ b/kibble/api/pages/forum/top-count.py @@ -66,8 +66,8 @@ """ import json -import time import re +import time def run(API, environ, indata, session): diff --git a/kibble/api/pages/forum/top.py b/kibble/api/pages/forum/top.py index 7775a17a..13005dc2 100644 --- a/kibble/api/pages/forum/top.py +++ b/kibble/api/pages/forum/top.py @@ -65,9 +65,9 @@ This is the issue actors stats page for Kibble """ +import hashlib import json import time -import hashlib def run(API, environ, indata, session): diff --git a/kibble/api/pages/issue/actors.py b/kibble/api/pages/issue/actors.py index 29308fda..39e1e8e2 100644 --- a/kibble/api/pages/issue/actors.py +++ b/kibble/api/pages/issue/actors.py @@ -65,9 +65,9 @@ This is the issue actors stats page for Kibble """ +import hashlib import json import time -import hashlib def run(API, environ, indata, session): diff --git a/kibble/api/pages/issue/age.py b/kibble/api/pages/issue/age.py index d5d98691..b51beedb 100644 --- a/kibble/api/pages/issue/age.py +++ b/kibble/api/pages/issue/age.py @@ -65,9 +65,9 @@ This is the issue actors stats page for Kibble """ +import hashlib import json import time -import hashlib def run(API, environ, indata, session): diff --git a/kibble/api/pages/issue/closers.py b/kibble/api/pages/issue/closers.py index 94a409a7..79ad41ab 100644 --- a/kibble/api/pages/issue/closers.py +++ b/kibble/api/pages/issue/closers.py @@ -65,9 +65,9 @@ This is the TopN issue closers list renderer for Kibble """ +import hashlib import json import time -import hashlib def run(API, environ, indata, session): diff --git a/kibble/api/pages/issue/issues.py b/kibble/api/pages/issue/issues.py index bdc6ae05..0a59778f 100644 --- a/kibble/api/pages/issue/issues.py +++ b/kibble/api/pages/issue/issues.py @@ -65,9 +65,10 @@ This is the issue timeseries renderer for Kibble """ +import hashlib import json import time -import hashlib + # This creates an empty timeseries object with # all categories initialized as 0 opened, 0 closed. diff --git a/kibble/api/pages/issue/openers.py b/kibble/api/pages/issue/openers.py index cee81abb..66d6ab54 100644 --- a/kibble/api/pages/issue/openers.py +++ b/kibble/api/pages/issue/openers.py @@ -65,9 +65,9 @@ This is the TopN issue openers list renderer for Kibble """ +import hashlib import json import time -import hashlib def run(API, environ, indata, session): diff --git a/kibble/api/pages/issue/pony-timeseries.py b/kibble/api/pages/issue/pony-timeseries.py index 1c1b2b8c..46c1645d 100644 --- a/kibble/api/pages/issue/pony-timeseries.py +++ b/kibble/api/pages/issue/pony-timeseries.py @@ -65,10 +65,11 @@ This is the pony factor renderer for Kibble """ +import datetime import json -import time import re -import datetime +import time + import dateutil.relativedelta diff --git a/kibble/api/pages/issue/relationships.py b/kibble/api/pages/issue/relationships.py index 3b4985b8..74d784c7 100644 --- a/kibble/api/pages/issue/relationships.py +++ b/kibble/api/pages/issue/relationships.py @@ -65,12 +65,12 @@ This is the issue tracker relationship list renderer for Kibble """ -import json -import time -import hashlib import copy -import re +import hashlib +import json import math +import re +import time def run(API, environ, indata, session): diff --git a/kibble/api/pages/issue/retention.py b/kibble/api/pages/issue/retention.py index 3766e67d..94d35d63 100644 --- a/kibble/api/pages/issue/retention.py +++ b/kibble/api/pages/issue/retention.py @@ -67,10 +67,10 @@ This is the code contributor retention factor renderer for Kibble """ +import datetime import json -import time import re -import datetime +import time def run(API, environ, indata, session): diff --git a/kibble/api/pages/issue/top-count.py b/kibble/api/pages/issue/top-count.py index 38273cee..9fa39f65 100644 --- a/kibble/api/pages/issue/top-count.py +++ b/kibble/api/pages/issue/top-count.py @@ -66,8 +66,8 @@ """ import json -import time import re +import time def run(API, environ, indata, session): diff --git a/kibble/api/pages/issue/top.py b/kibble/api/pages/issue/top.py index 70fae4d7..17851766 100644 --- a/kibble/api/pages/issue/top.py +++ b/kibble/api/pages/issue/top.py @@ -65,9 +65,9 @@ This is the issue actors stats page for Kibble """ +import hashlib import json import time -import hashlib def run(API, environ, indata, session): diff --git a/kibble/api/pages/mail/keyphrases.py b/kibble/api/pages/mail/keyphrases.py index ead6d4bf..367304e4 100644 --- a/kibble/api/pages/mail/keyphrases.py +++ b/kibble/api/pages/mail/keyphrases.py @@ -65,9 +65,9 @@ This is the common key phrases renderer for Kibble """ +import hashlib import json import time -import hashlib def run(API, environ, indata, session): diff --git a/kibble/api/pages/mail/map.py b/kibble/api/pages/mail/map.py index ba188ea1..8f906cc9 100644 --- a/kibble/api/pages/mail/map.py +++ b/kibble/api/pages/mail/map.py @@ -65,12 +65,12 @@ This is the committer relationship list renderer for Kibble """ -import json -import time -import hashlib import copy -import re +import hashlib +import json import math +import re +import time badBots = r"(JIRA|Hudson|jira|jenkins|GitHub|git@|dev@|bugzilla|gerrit)" diff --git a/kibble/api/pages/mail/pony-timeseries.py b/kibble/api/pages/mail/pony-timeseries.py index 578ae70c..bfa31d08 100644 --- a/kibble/api/pages/mail/pony-timeseries.py +++ b/kibble/api/pages/mail/pony-timeseries.py @@ -65,10 +65,11 @@ This is the pony factor renderer for Kibble """ +import datetime import json -import time import re -import datetime +import time + import dateutil.relativedelta diff --git a/kibble/api/pages/mail/relationships.py b/kibble/api/pages/mail/relationships.py index 69e3df4a..70fd7c3d 100644 --- a/kibble/api/pages/mail/relationships.py +++ b/kibble/api/pages/mail/relationships.py @@ -65,12 +65,12 @@ This is the committer relationship list renderer for Kibble """ -import json -import time -import hashlib import copy -import re +import hashlib +import json import math +import re +import time def run(API, environ, indata, session): diff --git a/kibble/api/pages/mail/retention.py b/kibble/api/pages/mail/retention.py index dd54a7b3..57f26d92 100644 --- a/kibble/api/pages/mail/retention.py +++ b/kibble/api/pages/mail/retention.py @@ -67,10 +67,10 @@ This is the code contributor retention factor renderer for Kibble """ +import datetime import json -import time import re -import datetime +import time def run(API, environ, indata, session): diff --git a/kibble/api/pages/mail/timeseries-single.py b/kibble/api/pages/mail/timeseries-single.py index d615715c..51f3cd8a 100644 --- a/kibble/api/pages/mail/timeseries-single.py +++ b/kibble/api/pages/mail/timeseries-single.py @@ -66,9 +66,9 @@ unlike timeseries.py, this only shows mail sent, not topics or authors. """ +import hashlib import json import time -import hashlib def run(API, environ, indata, session): diff --git a/kibble/api/pages/mail/timeseries.py b/kibble/api/pages/mail/timeseries.py index 639d728d..a5942c47 100644 --- a/kibble/api/pages/mail/timeseries.py +++ b/kibble/api/pages/mail/timeseries.py @@ -65,9 +65,9 @@ This is the email timeseries renderer for Kibble """ +import hashlib import json import time -import hashlib def run(API, environ, indata, session): diff --git a/kibble/api/pages/mail/top-authors.py b/kibble/api/pages/mail/top-authors.py index d2d14c1f..cad69b8c 100644 --- a/kibble/api/pages/mail/top-authors.py +++ b/kibble/api/pages/mail/top-authors.py @@ -65,10 +65,10 @@ This is the TopN committers list renderer for Kibble """ -import json -import time import hashlib +import json import re +import time ROBITS = r"(git|jira|jenkins|gerrit)@" diff --git a/kibble/api/pages/mail/top-topics.py b/kibble/api/pages/mail/top-topics.py index 7696736e..ce91c849 100644 --- a/kibble/api/pages/mail/top-topics.py +++ b/kibble/api/pages/mail/top-topics.py @@ -65,9 +65,9 @@ This is the TopN committers list renderer for Kibble """ +import hashlib import json import time -import hashlib def run(API, environ, indata, session): diff --git a/kibble/api/pages/mail/trends.py b/kibble/api/pages/mail/trends.py index 5348bc36..2ddf22af 100644 --- a/kibble/api/pages/mail/trends.py +++ b/kibble/api/pages/mail/trends.py @@ -65,9 +65,9 @@ This is the Email trends renderer for Kibble """ +import datetime import json import time -import datetime def run(API, environ, indata, session): diff --git a/kibble/api/pages/org/contributors.py b/kibble/api/pages/org/contributors.py index 632bc71f..a71dd299 100644 --- a/kibble/api/pages/org/contributors.py +++ b/kibble/api/pages/org/contributors.py @@ -48,9 +48,9 @@ This is the contributor list renderer for Kibble """ +import hashlib import json import time -import hashlib cached_people = {} # Store people we know, so we don't have to fetch them again. diff --git a/kibble/api/pages/org/members.py b/kibble/api/pages/org/members.py index 79641b64..f890db33 100644 --- a/kibble/api/pages/org/members.py +++ b/kibble/api/pages/org/members.py @@ -115,9 +115,9 @@ This is the Org list renderer for Kibble """ +import hashlib import json import time -import hashlib def canInvite(session): diff --git a/kibble/api/pages/org/sourcetypes.py b/kibble/api/pages/org/sourcetypes.py index df4dab94..e7f8eed3 100644 --- a/kibble/api/pages/org/sourcetypes.py +++ b/kibble/api/pages/org/sourcetypes.py @@ -64,10 +64,10 @@ """ This is the source types handler for Kibble """ +import json import os import yaml -import json from kibble.settings import YAML_DIRECTORY diff --git a/kibble/api/pages/session.py b/kibble/api/pages/session.py index 225927bf..91e21a9c 100644 --- a/kibble/api/pages/session.py +++ b/kibble/api/pages/session.py @@ -94,13 +94,14 @@ This is the user session handler for Kibble """ +import hashlib import json import re import time -import bcrypt -import hashlib import uuid +import bcrypt + def run(API, environ, indata, session): diff --git a/kibble/api/pages/sources.py b/kibble/api/pages/sources.py index fc8923ca..851182b6 100644 --- a/kibble/api/pages/sources.py +++ b/kibble/api/pages/sources.py @@ -123,11 +123,12 @@ This is the source list handler for Kibble """ +import hashlib import json import os import re import time -import hashlib + import yaml from kibble.settings import YAML_DIRECTORY diff --git a/kibble/api/pages/views.py b/kibble/api/pages/views.py index a70607bb..4b0a144e 100644 --- a/kibble/api/pages/views.py +++ b/kibble/api/pages/views.py @@ -137,10 +137,10 @@ This is the views (filters) list handler for Kibble """ +import hashlib import json import re import time -import hashlib def run(API, environ, indata, session): diff --git a/kibble/api/pages/widgets.py b/kibble/api/pages/widgets.py index 35a7c963..a7cd843a 100644 --- a/kibble/api/pages/widgets.py +++ b/kibble/api/pages/widgets.py @@ -46,10 +46,10 @@ """ This is the widget design handler for Kibble """ +import json import os import yaml -import json from kibble.settings import YAML_DIRECTORY diff --git a/kibble/api/plugins/openapi.py b/kibble/api/plugins/openapi.py index 6265fc29..fa500b95 100644 --- a/kibble/api/plugins/openapi.py +++ b/kibble/api/plugins/openapi.py @@ -21,12 +21,13 @@ https://github.com/OAI/OpenAPI-Specification (a simplified version, ahem) """ -import yaml -import json import functools +import json import operator import re +import yaml + class OpenAPIException(Exception): def __init__(self, message): diff --git a/kibble/api/plugins/session.py b/kibble/api/plugins/session.py index 035fb76e..282b3f3d 100644 --- a/kibble/api/plugins/session.py +++ b/kibble/api/plugins/session.py @@ -20,10 +20,10 @@ It handles setting/getting cookies and user prefs """ -import re import http.cookies -import uuid +import re import time +import uuid class KibbleSession(object): diff --git a/kibble/api/yaml/openapi/combine.py b/kibble/api/yaml/openapi/combine.py index da618280..0b04380a 100644 --- a/kibble/api/yaml/openapi/combine.py +++ b/kibble/api/yaml/openapi/combine.py @@ -15,10 +15,11 @@ # specific language governing permissions and limitations # under the License. -import yaml import os -import sys import re +import sys + +import yaml from kibble.settings import YAML_DIRECTORY diff --git a/kibble/cli/setup_command.py b/kibble/cli/setup_command.py index 842701c9..a88b7abe 100644 --- a/kibble/cli/setup_command.py +++ b/kibble/cli/setup_command.py @@ -15,15 +15,15 @@ # specific language governing permissions and limitations # under the License. +import json +import logging import os import sys -import logging from getpass import getpass +import bcrypt import click import tenacity -import bcrypt -import json from elasticsearch import Elasticsearch from kibble.configuration import conf diff --git a/kibble/scanners/brokers/kibbleES.py b/kibble/scanners/brokers/kibbleES.py index 2e1d7c42..c061e307 100644 --- a/kibble/scanners/brokers/kibbleES.py +++ b/kibble/scanners/brokers/kibbleES.py @@ -15,9 +15,10 @@ # specific language governing permissions and limitations # under the License. +import sys + import elasticsearch import elasticsearch.helpers -import sys KIBBLE_DB_VERSION = 2 # Current DB struct version ACCEPTED_DB_VERSIONS = [1, 2] # Versions we know how to work with. diff --git a/kibble/scanners/kibble-scanner.py b/kibble/scanners/kibble-scanner.py index 953505d6..52b2954c 100644 --- a/kibble/scanners/kibble-scanner.py +++ b/kibble/scanners/kibble-scanner.py @@ -15,14 +15,15 @@ # specific language governing permissions and limitations # under the License. +import argparse +import multiprocessing import os import threading -import multiprocessing +import time from pprint import pprint import yaml -import time -import argparse + from kibble.scanners import scanners from kibble.scanners.brokers import kibbleES diff --git a/kibble/scanners/scanners/bugzilla.py b/kibble/scanners/scanners/bugzilla.py index e45a69e5..b0eb19d4 100644 --- a/kibble/scanners/scanners/bugzilla.py +++ b/kibble/scanners/scanners/bugzilla.py @@ -17,13 +17,14 @@ """ This is the BugZilla scanner plugin for Kible """ -import re +import hashlib import json +import re import time -import hashlib -from threading import Thread, Lock -from kibble.scanners.utils import jsonapi import urllib +from threading import Lock, Thread + +from kibble.scanners.utils import jsonapi title = "Scanner for BugZilla" version = "0.1.0" diff --git a/kibble/scanners/scanners/buildbot.py b/kibble/scanners/scanners/buildbot.py index 8a89e331..e4f7b2d5 100644 --- a/kibble/scanners/scanners/buildbot.py +++ b/kibble/scanners/scanners/buildbot.py @@ -15,11 +15,11 @@ # specific language governing permissions and limitations # under the License. -import time import datetime -import re import hashlib +import re import threading +import time from kibble.scanners.utils import jsonapi diff --git a/kibble/scanners/scanners/discourse.py b/kibble/scanners/scanners/discourse.py index 8ed1159e..eeb15de7 100644 --- a/kibble/scanners/scanners/discourse.py +++ b/kibble/scanners/scanners/discourse.py @@ -15,12 +15,12 @@ # specific language governing permissions and limitations # under the License. -import time import datetime -import re import hashlib -import threading import os +import re +import threading +import time from kibble.scanners.utils import jsonapi diff --git a/kibble/scanners/scanners/gerrit.py b/kibble/scanners/scanners/gerrit.py index 70f0b90e..ebd15a9e 100644 --- a/kibble/scanners/scanners/gerrit.py +++ b/kibble/scanners/scanners/gerrit.py @@ -15,12 +15,13 @@ # specific language governing permissions and limitations # under the License. +import hashlib +import json import re +import time + import requests -import hashlib from dateutil import parser -import time -import json title = "Scanner for Gerrit Code Review" version = "0.1.1" diff --git a/kibble/scanners/scanners/git-census.py b/kibble/scanners/scanners/git-census.py index 70cbdbc2..16ac9cc9 100644 --- a/kibble/scanners/scanners/git-census.py +++ b/kibble/scanners/scanners/git-census.py @@ -15,14 +15,14 @@ # specific language governing permissions and limitations # under the License. +import datetime +import email.utils +import hashlib import os import re import subprocess -import time import tempfile -import hashlib -import email.utils -import datetime, time +import time title = "Census Scanner for Git" version = "0.1.0" diff --git a/kibble/scanners/scanners/git-evolution.py b/kibble/scanners/scanners/git-evolution.py index 6ce2fe97..6c842436 100644 --- a/kibble/scanners/scanners/git-evolution.py +++ b/kibble/scanners/scanners/git-evolution.py @@ -16,12 +16,12 @@ # under the License. """ Git Evolution scanner """ -import os -import subprocess -import time import calendar import datetime import hashlib +import os +import subprocess +import time from kibble.scanners.utils import sloc diff --git a/kibble/scanners/scanners/git-sloc.py b/kibble/scanners/scanners/git-sloc.py index f73cf860..b1ef0811 100644 --- a/kibble/scanners/scanners/git-sloc.py +++ b/kibble/scanners/scanners/git-sloc.py @@ -19,7 +19,7 @@ import subprocess import time -from kibble.scanners.utils import sloc, git +from kibble.scanners.utils import git, sloc """ Source Lines of Code counter for Git """ diff --git a/kibble/scanners/scanners/github-issues.py b/kibble/scanners/scanners/github-issues.py index 1b5f0bbb..c5edc6eb 100644 --- a/kibble/scanners/scanners/github-issues.py +++ b/kibble/scanners/scanners/github-issues.py @@ -15,11 +15,12 @@ # specific language governing permissions and limitations # under the License. -import re import hashlib -from dateutil import parser +import re import time + import requests +from dateutil import parser from kibble.scanners.utils import github diff --git a/kibble/scanners/scanners/jenkins.py b/kibble/scanners/scanners/jenkins.py index e9833de0..0f7db895 100644 --- a/kibble/scanners/scanners/jenkins.py +++ b/kibble/scanners/scanners/jenkins.py @@ -15,11 +15,11 @@ # specific language governing permissions and limitations # under the License. -import time import datetime -import re import hashlib +import re import threading +import time import urllib.parse from kibble.scanners.utils import jsonapi diff --git a/kibble/scanners/scanners/jira.py b/kibble/scanners/scanners/jira.py index 410f8d50..f10b3fff 100644 --- a/kibble/scanners/scanners/jira.py +++ b/kibble/scanners/scanners/jira.py @@ -15,10 +15,11 @@ # specific language governing permissions and limitations # under the License. -import time -import re import hashlib +import re import threading +import time + import requests.exceptions from kibble.scanners.utils import jsonapi diff --git a/kibble/scanners/scanners/pipermail.py b/kibble/scanners/scanners/pipermail.py index d8b33dd2..59c96ed9 100644 --- a/kibble/scanners/scanners/pipermail.py +++ b/kibble/scanners/scanners/pipermail.py @@ -15,15 +15,15 @@ # specific language governing permissions and limitations # under the License. -import mailbox +import datetime import email.errors -import email.utils import email.header -import time -import re -import os +import email.utils import hashlib -import datetime +import mailbox +import os +import re +import time from kibble.scanners.utils import urlmisc diff --git a/kibble/scanners/scanners/ponymail-kpe.py b/kibble/scanners/scanners/ponymail-kpe.py index b5b40072..e2a41290 100644 --- a/kibble/scanners/scanners/ponymail-kpe.py +++ b/kibble/scanners/scanners/ponymail-kpe.py @@ -15,8 +15,8 @@ # specific language governing permissions and limitations # under the License. -import time import re +import time from kibble.scanners.utils import jsonapi, kpe diff --git a/kibble/scanners/scanners/ponymail-tone.py b/kibble/scanners/scanners/ponymail-tone.py index c616a475..22076de5 100644 --- a/kibble/scanners/scanners/ponymail-tone.py +++ b/kibble/scanners/scanners/ponymail-tone.py @@ -18,8 +18,8 @@ """ This is a Kibble scanner plugin for Apache Pony Mail sources. """ -import time import re +import time from kibble.scanners.utils import jsonapi, tone diff --git a/kibble/scanners/scanners/ponymail.py b/kibble/scanners/scanners/ponymail.py index 89e15723..1744ea15 100644 --- a/kibble/scanners/scanners/ponymail.py +++ b/kibble/scanners/scanners/ponymail.py @@ -15,10 +15,10 @@ # specific language governing permissions and limitations # under the License. -import time -import re -import hashlib import datetime +import hashlib +import re +import time from kibble.scanners.utils import jsonapi diff --git a/kibble/scanners/scanners/travis.py b/kibble/scanners/scanners/travis.py index b8c92820..519cb400 100644 --- a/kibble/scanners/scanners/travis.py +++ b/kibble/scanners/scanners/travis.py @@ -15,11 +15,12 @@ # specific language governing permissions and limitations # under the License. -import time import datetime -import re import hashlib +import re import threading +import time + import requests import requests.exceptions diff --git a/kibble/scanners/scanners/twitter.py b/kibble/scanners/scanners/twitter.py index b274fe79..996c0487 100644 --- a/kibble/scanners/scanners/twitter.py +++ b/kibble/scanners/scanners/twitter.py @@ -18,8 +18,9 @@ """ This is a Kibble scanner plugin for Twitter sources. """ -import time import hashlib +import time + import twitter title = "Scanner plugin for Twitter" diff --git a/kibble/scanners/utils/git.py b/kibble/scanners/utils/git.py index bd712524..5ea7607c 100644 --- a/kibble/scanners/utils/git.py +++ b/kibble/scanners/utils/git.py @@ -18,9 +18,9 @@ """ This is the Kibble git utility plugin """ import os -import sys -import subprocess import re +import subprocess +import sys def defaultBranch(source, datapath, KibbleBit=None): diff --git a/kibble/scanners/utils/github.py b/kibble/scanners/utils/github.py index 95b30ea0..5284b1bf 100644 --- a/kibble/scanners/utils/github.py +++ b/kibble/scanners/utils/github.py @@ -17,9 +17,10 @@ """ GitHub utility library """ import re -import requests import time +import requests + repo_pattern = re.compile(".*[:/]([^/]+)/([^/]+).git") issues_api = "https://api.github.com/repos/%s/%s/issues" traffic_api = "https://api.github.com/repos/%s/%s/traffic" diff --git a/kibble/scanners/utils/jsonapi.py b/kibble/scanners/utils/jsonapi.py index 28fc4747..64e7a7c5 100644 --- a/kibble/scanners/utils/jsonapi.py +++ b/kibble/scanners/utils/jsonapi.py @@ -18,9 +18,10 @@ """ This is a Kibble JSON API plugin. """ -import requests -import time import base64 +import time + +import requests CONNECT_TIMEOUT = 2 # Max timeout for the connect part of a request. diff --git a/kibble/scanners/utils/kpe.py b/kibble/scanners/utils/kpe.py index 3589019f..c42e8e39 100644 --- a/kibble/scanners/utils/kpe.py +++ b/kibble/scanners/utils/kpe.py @@ -33,9 +33,10 @@ Currently only pony mail is supported. more to come. """ +import json import re + import requests -import json def trimBody(body): diff --git a/kibble/scanners/utils/sloc.py b/kibble/scanners/utils/sloc.py index 46ab9eda..850ab13e 100644 --- a/kibble/scanners/utils/sloc.py +++ b/kibble/scanners/utils/sloc.py @@ -17,9 +17,9 @@ """ This is the SLoC counter utility for Kibble """ -import subprocess -import re import multiprocessing +import re +import subprocess def count(path): diff --git a/kibble/scanners/utils/tone.py b/kibble/scanners/utils/tone.py index 87e945da..dab6e73c 100644 --- a/kibble/scanners/utils/tone.py +++ b/kibble/scanners/utils/tone.py @@ -28,9 +28,10 @@ Currently only pony mail is supported. more to come. """ -import requests import json +import requests + def watsonTone(KibbleBit, bodies): """ Sentiment analysis using IBM Watson """ diff --git a/kibble/scanners/utils/urlmisc.py b/kibble/scanners/utils/urlmisc.py index 8f655ba6..cf0ee696 100644 --- a/kibble/scanners/utils/urlmisc.py +++ b/kibble/scanners/utils/urlmisc.py @@ -19,11 +19,11 @@ This is a Kibble miscellaneous URL functions plugin. """ import base64 -import urllib.request import gzip -import tempfile import io import subprocess +import tempfile +import urllib.request def unzip(url, creds=None, cookie=None): diff --git a/kibble/setup/makeaccount.py b/kibble/setup/makeaccount.py index 50364859..2b4e954b 100644 --- a/kibble/setup/makeaccount.py +++ b/kibble/setup/makeaccount.py @@ -15,13 +15,16 @@ # specific language governing permissions and limitations # under the License. -import sys, os, os.path -import elasticsearch import argparse -import yaml +import os +import os.path +import sys + import bcrypt +import elasticsearch +import yaml -from kibble.settings import YAML_DIRECTORY, KIBBLE_YAML +from kibble.settings import KIBBLE_YAML, YAML_DIRECTORY class KibbleDatabase(object): From 1aa68040a70397b3aea7b26def912ad67d717b67 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20S=C5=82owikowski?= Date: Fri, 27 Nov 2020 12:26:30 +0100 Subject: [PATCH 22/48] Remove unused variables (#98) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * removed few unused variables from code * bugzilla – removed unused variables from code * git-census – removed unused variable * buildbot – removed unused variable * pipermail – removed unused variable * ponymail – removed unused variable * discourse – removed unused variable * github-issues – removed unused variable * jenkins – removed unused variable * jira – removed unused variable * github-stats – removed unused variable * ponymail – removed unused variable * git-evolution – removed unused variable --- kibble/scanners/brokers/kibbleES.py | 1 - kibble/scanners/kibble-scanner.py | 3 +-- kibble/scanners/scanners/bugzilla.py | 7 ++----- kibble/scanners/scanners/buildbot.py | 9 +-------- kibble/scanners/scanners/discourse.py | 6 +----- kibble/scanners/scanners/git-census.py | 1 - kibble/scanners/scanners/git-evolution.py | 2 -- kibble/scanners/scanners/github-issues.py | 1 - kibble/scanners/scanners/github-stats.py | 2 -- kibble/scanners/scanners/jenkins.py | 4 +--- kibble/scanners/scanners/jira.py | 7 ++----- kibble/scanners/scanners/pipermail.py | 3 +-- kibble/scanners/scanners/ponymail-kpe.py | 2 +- kibble/scanners/scanners/ponymail-tone.py | 2 +- kibble/scanners/scanners/ponymail.py | 3 +-- kibble/scanners/scanners/travis.py | 14 +++----------- kibble/scanners/utils/git.py | 1 - kibble/scanners/utils/kpe.py | 1 - kibble/scanners/utils/tone.py | 1 - 19 files changed, 15 insertions(+), 55 deletions(-) diff --git a/kibble/scanners/brokers/kibbleES.py b/kibble/scanners/brokers/kibbleES.py index c061e307..5792dd8e 100644 --- a/kibble/scanners/brokers/kibbleES.py +++ b/kibble/scanners/brokers/kibbleES.py @@ -352,7 +352,6 @@ def __init__(self, config): def organisations(self): """ Return a list of all organisations """ - orgs = [] # Run the search, fetch all orgs, 9999 max. TODO: Scroll??? res = self.DB.search( diff --git a/kibble/scanners/kibble-scanner.py b/kibble/scanners/kibble-scanner.py index 52b2954c..390a2631 100644 --- a/kibble/scanners/kibble-scanner.py +++ b/kibble/scanners/kibble-scanner.py @@ -102,7 +102,6 @@ def run(self): global BIG_LOCK, PENDING_OBJECTS time.sleep(0.5) # Primarily to align printouts. # While there are objects to snag - a = 0 while PENDING_OBJECTS: BIG_LOCK.acquire(blocking=True) try: @@ -161,7 +160,7 @@ def main(): for source in org.sources(view=args.view): tooNew = False if "steps" in source: - for key, step in source["steps"].items(): + for _, step in source["steps"].items(): if "time" in step and step["time"] >= minAge: tooNew = True break diff --git a/kibble/scanners/scanners/bugzilla.py b/kibble/scanners/scanners/bugzilla.py index b0eb19d4..6b55e4fb 100644 --- a/kibble/scanners/scanners/bugzilla.py +++ b/kibble/scanners/scanners/bugzilla.py @@ -310,7 +310,7 @@ def run(self): self.block.acquire() try: rl = self.pendingTickets.pop(0) - except Exception as err: # list empty, likely + except Exception: # list empty, likely self.block.release() return if not rl: @@ -338,7 +338,6 @@ def run(self): def scan(KibbleBit, source): - path = source["sourceID"] url = source["sourceURL"] source["steps"]["issues"] = { @@ -358,7 +357,6 @@ def scan(KibbleBit, source): and len(source["creds"]["username"]) > 0 ): creds = "%s:%s" % (source["creds"]["username"], source["creds"]["password"]) - badOnes = 0 pendingTickets = [] openTickets = [] @@ -367,7 +365,6 @@ def scan(KibbleBit, source): dom = re.sub(r"/+$", "", dom) u = "%s/jsonrpc.cgi" % dom instance = bz.group(3) - lastTicket = 0 params = { "product": [instance], @@ -445,9 +442,9 @@ def scan(KibbleBit, source): % (len(openTickets), len(pendingTickets) - len(openTickets)) ) - badOnes = 0 block = Lock() threads = [] + # TODO: Fix this loop for i in range(0, 4): t = bzThread(KibbleBit, source, block, pendingTickets, openTickets, u, dom) threads.append(t) diff --git a/kibble/scanners/scanners/buildbot.py b/kibble/scanners/scanners/buildbot.py index e4f7b2d5..72561ff3 100644 --- a/kibble/scanners/scanners/buildbot.py +++ b/kibble/scanners/scanners/buildbot.py @@ -40,15 +40,12 @@ def accepts(source): def scanJob(KibbleBit, source, job, creds): """ Scans a single job for activity """ - NOW = int(datetime.datetime.utcnow().timestamp()) dhash = hashlib.sha224( ("%s-%s-%s" % (source["organisation"], source["sourceID"], job)).encode( "ascii", errors="replace" ) ).hexdigest() - found = True doc = None - parseIt = False found = KibbleBit.exists("cijob", dhash) jobURL = "%s/json/builders/%s/builds/_all" % (source["sourceURL"], job) @@ -140,7 +137,7 @@ def run(self): self.block.acquire() try: job = self.jobs.pop(0) - except Exception as err: + except Exception: self.block.release() return if not job: @@ -178,8 +175,6 @@ def scan(KibbleBit, source): } KibbleBit.updateSource(source) - badOnes = 0 - pendingJobs = [] KibbleBit.pprint("Parsing Buildbot activity at %s" % source["sourceURL"]) source["steps"]["ci"] = { "time": time.time(), @@ -216,8 +211,6 @@ def scan(KibbleBit, source): # Scan queue items blocked = 0 stuck = 0 - totalqueuetime = 0 - labelQueuedBuilds = {} queueSize = 0 actualQueueSize = 0 building = 0 diff --git a/kibble/scanners/scanners/discourse.py b/kibble/scanners/scanners/discourse.py index eeb15de7..ee99dde6 100644 --- a/kibble/scanners/scanners/discourse.py +++ b/kibble/scanners/scanners/discourse.py @@ -41,10 +41,7 @@ def accepts(source): def scanJob(KibbleBit, source, cat, creds): """ Scans a single discourse category for activity """ - NOW = int(datetime.datetime.utcnow().timestamp()) - # Get $discourseURL/c/$catID - catURL = os.path.join(source["sourceURL"], "c/%s" % cat["id"]) KibbleBit.pprint("Scanning Discourse category '%s' at %s" % (cat["slug"], catURL)) @@ -249,7 +246,7 @@ def run(self): self.block.acquire() try: job = self.jobs.pop(0) - except Exception as err: + except Exception: self.block.release() return if not job: @@ -289,7 +286,6 @@ def scan(KibbleBit, source): } KibbleBit.updateSource(source) - badOnes = 0 pendingJobs = [] KibbleBit.pprint("Parsing Discourse activity at %s" % source["sourceURL"]) source["steps"]["forum"] = { diff --git a/kibble/scanners/scanners/git-census.py b/kibble/scanners/scanners/git-census.py index 16ac9cc9..33a0920a 100644 --- a/kibble/scanners/scanners/git-census.py +++ b/kibble/scanners/scanners/git-census.py @@ -92,7 +92,6 @@ def scan(KibbleBit, source): inp = f.read() f.close() os.unlink(tmp.name) - edone = 0 KibbleBit.pprint("Parsing log for %s (%s)..." % (rid, url)) for m in re.finditer( u":([a-f0-9]+)\|([^\r\n|]+)\|([^\r\n|]+)\|([^\r\n|]+)\|([^\r\n|]+)\|([\d+]+)\r?\n([^:]+?):", diff --git a/kibble/scanners/scanners/git-evolution.py b/kibble/scanners/scanners/git-evolution.py index 6c842436..f391e421 100644 --- a/kibble/scanners/scanners/git-evolution.py +++ b/kibble/scanners/scanners/git-evolution.py @@ -118,7 +118,6 @@ def find_branch(date, gpath): return "master" except: os.chdir(gpath) - branch = "" try: return ( subprocess.check_output( @@ -138,7 +137,6 @@ def find_branch(date, gpath): def scan(KibbleBit, source): rid = source["sourceID"] - url = source["sourceURL"] rootpath = "%s/%s/git" % ( KibbleBit.config["scanner"]["scratchdir"], source["organisation"], diff --git a/kibble/scanners/scanners/github-issues.py b/kibble/scanners/scanners/github-issues.py index c5edc6eb..06bbde67 100644 --- a/kibble/scanners/scanners/github-issues.py +++ b/kibble/scanners/scanners/github-issues.py @@ -196,7 +196,6 @@ def scan(KibbleBit, source, firstAttempt=True): doc = make_issue(source, issue, people) dhash = doc["id"] - stored_change = None if KibbleBit.exists("issue", dhash): es_doc = KibbleBit.get("issue", dhash) if not status_changed(es_doc, doc): diff --git a/kibble/scanners/scanners/github-stats.py b/kibble/scanners/scanners/github-stats.py index 5f993dc4..504d1516 100644 --- a/kibble/scanners/scanners/github-stats.py +++ b/kibble/scanners/scanners/github-stats.py @@ -42,11 +42,9 @@ def getTime(string): def scan(KibbleBit, source): # Get some vars, construct a data path for the repo - path = source["sourceID"] url = source["sourceURL"] auth = None - people = {} if "creds" in source: KibbleBit.pprint("Using auth for repo %s" % source["sourceURL"]) creds = source["creds"] diff --git a/kibble/scanners/scanners/jenkins.py b/kibble/scanners/scanners/jenkins.py index 0f7db895..f7ec495e 100644 --- a/kibble/scanners/scanners/jenkins.py +++ b/kibble/scanners/scanners/jenkins.py @@ -50,9 +50,7 @@ def scanJob(KibbleBit, source, job, creds): "ascii", errors="replace" ) ).hexdigest() - found = True doc = None - parseIt = False found = KibbleBit.exists("cijob", dhash) # Get $jenkins/job/$job-name/json... @@ -156,6 +154,7 @@ def run(self): try: job = self.jobs.pop(0) except Exception as err: + print(f"An error occurred: {err}") self.block.release() return if not job: @@ -199,7 +198,6 @@ def scan(KibbleBit, source): } KibbleBit.updateSource(source) - badOnes = 0 pendingJobs = [] KibbleBit.pprint("Parsing Jenkins activity at %s" % source["sourceURL"]) source["steps"]["issues"] = { diff --git a/kibble/scanners/scanners/jira.py b/kibble/scanners/scanners/jira.py index f10b3fff..efc22cf4 100644 --- a/kibble/scanners/scanners/jira.py +++ b/kibble/scanners/scanners/jira.py @@ -130,7 +130,6 @@ def scanTicket(KibbleBit, key, u, source, creds, openTickets): ) ).hexdigest() found = True - doc = None parseIt = False # the 'domain' var we try to figure out here is used @@ -179,7 +178,7 @@ def scanTicket(KibbleBit, key, u, source, creds, openTickets): KibbleBit.pprint("%s does not exist (404'ed)" % key) return False except requests.exceptions.ConnectionError as err: - KibbleBit.pprint("Connection error, skipping this ticket for now!") + KibbleBit.pprint(f"Connection error: {err}, skipping this ticket for now!") return False st, closer = wasclosed(tjson) if st and not closer: @@ -323,6 +322,7 @@ def run(self): try: rl = self.pendingTickets.pop(0) except Exception as err: + print(f"An error occured: {err}") self.block.release() return if not rl: @@ -384,9 +384,6 @@ def scan(KibbleBit, source): } KibbleBit.updateSource(source) - badOnes = 0 - jsa = [] - jsp = [] pendingTickets = [] KibbleBit.pprint("Parsing JIRA activity at %s" % source["sourceURL"]) source["steps"]["issues"] = { diff --git a/kibble/scanners/scanners/pipermail.py b/kibble/scanners/scanners/pipermail.py index 59c96ed9..e361ad2e 100644 --- a/kibble/scanners/scanners/pipermail.py +++ b/kibble/scanners/scanners/pipermail.py @@ -49,8 +49,7 @@ def scan(KibbleBit, source): if pipermail: KibbleBit.pprint("Scanning Pipermail source %s" % url) skipped = 0 - jsa = [] - jsp = [] + source["steps"]["mail"] = { "time": time.time(), "status": "Downloading Pipermail statistics", diff --git a/kibble/scanners/scanners/ponymail-kpe.py b/kibble/scanners/scanners/ponymail-kpe.py index e2a41290..7e93320e 100644 --- a/kibble/scanners/scanners/ponymail-kpe.py +++ b/kibble/scanners/scanners/ponymail-kpe.py @@ -101,7 +101,7 @@ def scan(KibbleBit, source): if rv and "body" in rv: hits.append([hit["_id"], rv["body"], eml]) except Exception as err: - KibbleBit.pprint("Server error, skipping this email") + KibbleBit.pprint(f"Server error: {err}, skipping this email") bodies = [] for hit in hits: diff --git a/kibble/scanners/scanners/ponymail-tone.py b/kibble/scanners/scanners/ponymail-tone.py index 22076de5..e5cc02ba 100644 --- a/kibble/scanners/scanners/ponymail-tone.py +++ b/kibble/scanners/scanners/ponymail-tone.py @@ -102,7 +102,7 @@ def scan(KibbleBit, source): if rv and "body" in rv: hits.append([hit["_id"], rv["body"], eml]) except Exception as err: - KibbleBit.pprint("Server error, skipping this email") + KibbleBit.pprint(f"Server error: {err}, skipping this email") bodies = [] for hit in hits: diff --git a/kibble/scanners/scanners/ponymail.py b/kibble/scanners/scanners/ponymail.py index 1744ea15..78f0a4ae 100644 --- a/kibble/scanners/scanners/ponymail.py +++ b/kibble/scanners/scanners/ponymail.py @@ -69,7 +69,6 @@ def repliedTo(emails, struct): def getSender(email): sender = email["from"] - name = sender m = re.match(r"(.+)\s*<(.+)>", email["from"], flags=re.UNICODE) if m: name = m.group(1).replace('"', "").strip() @@ -164,7 +163,7 @@ def scan(KibbleBit, source): try: js = jsonapi.get(statsurl, cookie=cookie) except Exception as err: - KibbleBit.pprint("Server error, skipping this month") + KibbleBit.pprint(f"Server error: {err}, skipping this month") month -= 1 if month <= 0: month += 12 diff --git a/kibble/scanners/scanners/travis.py b/kibble/scanners/scanners/travis.py index 519cb400..d76ca571 100644 --- a/kibble/scanners/scanners/travis.py +++ b/kibble/scanners/scanners/travis.py @@ -41,16 +41,12 @@ def accepts(source): def scanJob(KibbleBit, source, bid, token, TLD): """ Scans a single job for activity """ - NOW = int(datetime.datetime.utcnow().timestamp()) + # NOW = int(datetime.datetime.utcnow().timestamp()) dhash = hashlib.sha224( ("%s-%s-%s" % (source["organisation"], source["sourceURL"], bid)).encode( "ascii", errors="replace" ) ).hexdigest() - found = True - doc = None - parseIt = False - found = KibbleBit.exists("cijob", dhash) # Get the job data pages = 0 @@ -192,7 +188,7 @@ def run(self): self.block.acquire() try: job = self.jobs.pop(0) - except Exception as err: + except Exception: self.block.release() return if not job: @@ -217,7 +213,7 @@ def run(self): badOnes = 0 -def scan(KibbleBit, source): +def sclan(KibbleBit, source): # Simple URL check travis = re.match(r"https?://travis-ci\.(org|com)", source["sourceURL"]) if travis: @@ -231,7 +227,6 @@ def scan(KibbleBit, source): } KibbleBit.updateSource(source) - badOnes = 0 pendingJobs = [] KibbleBit.pprint("Parsing Travis activity at %s" % source["sourceURL"]) source["steps"]["travis"] = { @@ -255,9 +250,6 @@ def scan(KibbleBit, source): KibbleBit.pprint("Travis CI requires a token to work!") return False - # Get the job list, paginated - sURL = source["sourceURL"] - # Used for pagination jobs = 100 offset = 0 diff --git a/kibble/scanners/utils/git.py b/kibble/scanners/utils/git.py index 5ea7607c..c1f3cd7f 100644 --- a/kibble/scanners/utils/git.py +++ b/kibble/scanners/utils/git.py @@ -32,7 +32,6 @@ def defaultBranch(source, datapath, KibbleBit=None): wanted_branches = KibbleBit.config["git"].get( "wanted_branches", wanted_branches ) - foundBranch = False # For each wanted branch, in order, look for it in our clone, # and return the name if found. diff --git a/kibble/scanners/utils/kpe.py b/kibble/scanners/utils/kpe.py index c42e8e39..7eae14b1 100644 --- a/kibble/scanners/utils/kpe.py +++ b/kibble/scanners/utils/kpe.py @@ -77,7 +77,6 @@ def azureKPE(KibbleBit, bodies): KPEs = [] for body in bodies: # Crop out quotes - lines = body.split("\n") body = trimBody(body) doc = {"language": "en", "id": str(a), "text": body} js["documents"].append(doc) diff --git a/kibble/scanners/utils/tone.py b/kibble/scanners/utils/tone.py index dab6e73c..3571c89b 100644 --- a/kibble/scanners/utils/tone.py +++ b/kibble/scanners/utils/tone.py @@ -167,7 +167,6 @@ def picoTone(KibbleBit, bodies): mood = {} # Sentiment is the overall score, and we use that for the neutrality of a text - val = (1 + doc["sentiment"]) / 2 mood["negative"] = doc[ "negativity" From 393635e18fcabe75e0cea72005b1f168c0769230 Mon Sep 17 00:00:00 2001 From: Tomek Urbaszek Date: Fri, 27 Nov 2020 13:18:01 +0000 Subject: [PATCH 23/48] Refactor make_account as cli command (#94) --- kibble.ini | 2 +- kibble/__main__.py | 30 +++++-- kibble/cli/make_account_command.py | 79 +++++++++++++++++ kibble/cli/setup_command.py | 17 ++-- kibble/{setup => mappings}/mappings.json | 0 kibble/settings.py | 3 + kibble/setup/kibble.yaml.sample | 19 ----- kibble/setup/makeaccount.py | 104 ----------------------- 8 files changed, 114 insertions(+), 140 deletions(-) create mode 100644 kibble/cli/make_account_command.py rename kibble/{setup => mappings}/mappings.json (100%) delete mode 100644 kibble/setup/kibble.yaml.sample delete mode 100644 kibble/setup/makeaccount.py diff --git a/kibble.ini b/kibble.ini index 0089f4d8..527d574b 100644 --- a/kibble.ini +++ b/kibble.ini @@ -12,7 +12,7 @@ version = 0.1.0 # Elasticsearch database name dbname = kibble # Connection uri used to determine host and port of elasticsearch instance -conn_uri = elasticsearch:9200 +conn_uri = http://elasticsearch:9200 # Number of shards in es cluster shards = 5 # Number of replicase in es cluster diff --git a/kibble/__main__.py b/kibble/__main__.py index 2807c35b..4b1c6b56 100644 --- a/kibble/__main__.py +++ b/kibble/__main__.py @@ -18,6 +18,7 @@ import click from kibble.cli import setup_command +from kibble.cli.make_account_command import make_account_cmd from kibble.configuration import conf from kibble.version import version as kibble_version @@ -29,7 +30,7 @@ def cli(): @cli.command("version", short_help="displays the current kibble version") def version(): - click.echo(kibble_version) + print(kibble_version) @cli.command("setup", short_help="starts the setup process for kibble") @@ -57,9 +58,6 @@ def version(): default=conf.get("elasticsearch", "replicas"), help="number of replicas for ES", ) -@click.option( - "-m", "--mailhost", default=conf.get("mail", "mailhost"), help="mail server host" -) @click.option("-a", "--autoadmin", default=False, help="generate generic admin account") @click.option("-k", "--skiponexist", default=True, help="skip DB creation if DBs exist") def setup( @@ -67,7 +65,6 @@ def setup( dbname: str, shards: str, replicas: str, - mailhost: str, autoadmin: bool, skiponexist: bool, ): @@ -76,12 +73,33 @@ def setup( dbname=dbname, shards=shards, replicas=replicas, - mailhost=mailhost, autoadmin=autoadmin, skiponexist=skiponexist, ) +@cli.command("make_account", short_help="creates new kibble user account") +@click.option( + "-u", "--username", help="username (email) of account to create", required=True +) +@click.option("-p", "--password", help="password to set for account", required=True) +@click.option("-A", "--admin", default=False, help="make account global admin") +@click.option( + "-a", "--orgadmin", default=False, help="make account owner of orgs invited to" +) +@click.option("-o", "--org", default=None, help="invite to this organisation") +def make_account( + username: str, + password: str, + admin: bool = False, + orgadmin: bool = False, + org: str = None, +): + make_account_cmd( + username=username, password=password, admin=admin, adminorg=orgadmin, org=org + ) + + def main(): cli() diff --git a/kibble/cli/make_account_command.py b/kibble/cli/make_account_command.py new file mode 100644 index 00000000..e0a59f68 --- /dev/null +++ b/kibble/cli/make_account_command.py @@ -0,0 +1,79 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from urllib.parse import urlparse + +import bcrypt +import elasticsearch + +from kibble.configuration import conf + + +class ESDatabase: + def __init__(self): + self.dbname = conf.get("elasticsearch", "dbname") + parsed = urlparse(conf.get("elasticsearch", "conn_uri")) + es_host = { + "host": parsed.hostname, + "port": parsed.port, + "use_ssl": conf.getboolean("elasticsearch", "ssl"), + "verify_certs": False, + "url_prefix": conf.get("elasticsearch", "uri"), + "http_auth": conf.get("elasticsearch", "auth") or None, + } + self.es = elasticsearch.Elasticsearch( + hosts=[es_host], max_retries=5, retry_on_timeout=True + ) + + def create_index(self, doc_type: str, id_: str, body: dict): + self.es.index(index=self.dbname, doc_type=doc_type, id=id_, body=body) + + +def make_account_cmd( + username: str, + password: str, + admin: bool = False, + adminorg: bool = False, + org: str = None, +) -> None: + """ + Create user kibble account. + + :param username: username for login for example email + :param password: password used for login + :param admin: set to true if created user should has admin access level + :param adminorg: organization user owns + :param org: organisation user belongs to + """ + orgs = [org] or [] + aorgs = [adminorg] if adminorg else [] + + salt = bcrypt.gensalt() + pwd = bcrypt.hashpw(password.encode("utf-8"), salt).decode("ascii") + doc = { + "email": username, + "password": pwd, + "displayName": username, + "organisations": orgs, + "ownerships": aorgs, + "defaultOrganisation": None, # Default org for user + "verified": True, # Account verified via email? + "userlevel": "admin" if admin else "user", + } + db = ESDatabase() + db.create_index(doc_type="useraccount", id_=username, body=doc) + print("Account created!") diff --git a/kibble/cli/setup_command.py b/kibble/cli/setup_command.py index a88b7abe..264e7ea3 100644 --- a/kibble/cli/setup_command.py +++ b/kibble/cli/setup_command.py @@ -22,11 +22,11 @@ from getpass import getpass import bcrypt -import click import tenacity from elasticsearch import Elasticsearch from kibble.configuration import conf +from kibble.settings import MAPPING_DIRECTORY KIBBLE_VERSION = conf.get("api", "version") KIBBLE_DB_VERSION = conf.get("api", "database") @@ -53,9 +53,7 @@ def create_es_index( # elasticsearch logs lots of warnings on retries/connection failure logging.getLogger("elasticsearch").setLevel(logging.ERROR) - mappings_json = os.path.join( - os.path.dirname(os.path.realpath(__file__)), "../setup/mappings.json" - ) + mappings_json = os.path.join(MAPPING_DIRECTORY, "mappings.json") with open(mappings_json, "r") as f: mappings = json.load(f) @@ -170,11 +168,10 @@ def do_setup( dbname: str, shards: str, replicas: str, - mailhost: str, autoadmin: bool, skiponexist: bool, ): - click.echo("Welcome to the Apache Kibble setup script!") + print("Welcome to the Apache Kibble setup script!") admin_name = "admin@kibble" admin_pass = "kibbleAdmin" @@ -188,7 +185,7 @@ def do_setup( # Create Elasticsearch index # Retry in case ES is not yet up - click.echo(f"Elasticsearch: {uri}") + print(f"Elasticsearch: {uri}") for attempt in tenacity.Retrying( retry=tenacity.retry_if_exception_type(exception_types=Exception), wait=tenacity.wait_fixed(10), @@ -196,7 +193,7 @@ def do_setup( reraise=True, ): with attempt: - click.echo("Trying to create ES index...") + print("Trying to create ES index...") create_es_index( conn_uri=uri, dbname=dbname, @@ -206,5 +203,5 @@ def do_setup( admin_pass=admin_pass, skiponexist=skiponexist, ) - click.echo() - click.echo("All done, Kibble should...work now :)") + print() + print("All done, Kibble should...work now :)") diff --git a/kibble/setup/mappings.json b/kibble/mappings/mappings.json similarity index 100% rename from kibble/setup/mappings.json rename to kibble/mappings/mappings.json diff --git a/kibble/settings.py b/kibble/settings.py index 243c3011..d165bb21 100644 --- a/kibble/settings.py +++ b/kibble/settings.py @@ -21,3 +21,6 @@ os.path.dirname(os.path.realpath(__file__)), "api", "yaml" ) KIBBLE_YAML = os.path.join(YAML_DIRECTORY, "kibble.yaml") +MAPPING_DIRECTORY = os.path.join( + os.path.dirname(os.path.realpath(__file__)), "mappings" +) diff --git a/kibble/setup/kibble.yaml.sample b/kibble/setup/kibble.yaml.sample deleted file mode 100644 index c414b5ed..00000000 --- a/kibble/setup/kibble.yaml.sample +++ /dev/null @@ -1,19 +0,0 @@ -elasticsearch: - host: localhost - port: 9200 - ssl: false - dbname: kibble - -mail: - mailhost: localhost - mailport: 25 - sender: Kibble - -accounts: - allowSignup: true - verifyEmail: false - # Example auto-invite setup: - autoInvite: - - - domain: apache.org - organisation: apache diff --git a/kibble/setup/makeaccount.py b/kibble/setup/makeaccount.py deleted file mode 100644 index 2b4e954b..00000000 --- a/kibble/setup/makeaccount.py +++ /dev/null @@ -1,104 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -import argparse -import os -import os.path -import sys - -import bcrypt -import elasticsearch -import yaml - -from kibble.settings import KIBBLE_YAML, YAML_DIRECTORY - - -class KibbleDatabase(object): - def __init__(self, config): - self.config = config - self.dbname = config["elasticsearch"]["dbname"] - self.ES = elasticsearch.Elasticsearch( - [ - { - "host": config["elasticsearch"]["host"], - "port": int(config["elasticsearch"]["port"]), - "use_ssl": config["elasticsearch"]["ssl"], - "verify_certs": False, - "url_prefix": config["elasticsearch"]["uri"] - if "uri" in config["elasticsearch"] - else "", - "http_auth": config["elasticsearch"]["auth"] - if "auth" in config["elasticsearch"] - else None, - } - ], - max_retries=5, - retry_on_timeout=True, - ) - - -arg_parser = argparse.ArgumentParser() -arg_parser.add_argument( - "-u", "--username", required=True, help="Username (email) of accoun to create" -) -arg_parser.add_argument( - "-p", "--password", required=True, help="Password to set for account" -) -arg_parser.add_argument( - "-n", "--name", help="Real name (displayname) of account (optional)" -) -arg_parser.add_argument( - "-A", "--admin", action="store_true", help="Make account global admin" -) -arg_parser.add_argument( - "-a", - "--orgadmin", - action="store_true", - help="Make account owner of orgs invited to", -) -arg_parser.add_argument("-o", "--org", help="Invite to this organisation") - -args = arg_parser.parse_args() - -# Load Kibble master configuration -with open(KIBBLE_YAML) as f: - config = yaml.safe_load(f) - -DB = KibbleDatabase(config) - -username = args.username -password = args.password -name = args.name if args.name else args.username -admin = True if args.admin else False -adminorg = True if args.orgadmin else False -orgs = [args.org] if args.org else [] -aorgs = orgs if adminorg else [] - -salt = bcrypt.gensalt() -pwd = bcrypt.hashpw(password.encode("utf-8"), salt).decode("ascii") -doc = { - "email": username, # Username (email) - "password": pwd, # Hashed password - "displayName": username, # Display Name - "organisations": orgs, # Orgs user belongs to (default is none) - "ownerships": aorgs, # Orgs user owns (default is none) - "defaultOrganisation": None, # Default org for user - "verified": True, # Account verified via email? - "userlevel": "admin" if admin else "user", # User level (user/admin) -} -DB.ES.index(index=DB.dbname, doc_type="useraccount", id=username, body=doc) -print("Account created!") From 5228a4a65bc069035db76b6b4ae9d322d45550a8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20S=C5=82owikowski?= Date: Fri, 27 Nov 2020 15:58:48 +0100 Subject: [PATCH 24/48] fix doc typo (#99) --- docs/source/setup.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/setup.rst b/docs/source/setup.rst index 868dc92a..67bffd7a 100644 --- a/docs/source/setup.rst +++ b/docs/source/setup.rst @@ -37,7 +37,7 @@ The Kibble Server (kibble) The Kibble Scanner Applications (kibble-scanners) This is a collection of scanning applications each designed to work with a specific type of resource (a git repo, a mailing list, a JIRA - instance etc) and push copmpiled data objects to the Kibble Server. + instance etc) and push compiled data objects to the Kibble Server. Some resources only have one scanner plugin, while others may have multiple plugins capable of dealing with specific aspects of a resource. From e4377906ea344a89324692c94dbe06982bef109c Mon Sep 17 00:00:00 2001 From: Kaxil Naik Date: Sat, 12 Dec 2020 21:13:15 +0000 Subject: [PATCH 25/48] Fix broken Kibble (#106) Before this commit I got this error when running Kibble locally: This was because https://github.com/apache/kibble/pull/83 replaced kibble.yaml with kibble.ini This PR reads the config from `kibble.ini`. This also adds a network to docker-compose file without which I was getting host not found error. --- docker-compose-dev.yaml | 17 +++++++++++++++++ kibble/api/handler.py | 13 ++++--------- kibble/api/plugins/database.py | 23 +++++++---------------- 3 files changed, 28 insertions(+), 25 deletions(-) diff --git a/docker-compose-dev.yaml b/docker-compose-dev.yaml index 3e67c369..d4d8b887 100644 --- a/docker-compose-dev.yaml +++ b/docker-compose-dev.yaml @@ -1,5 +1,9 @@ version: '3' +networks: + kibble: + driver: bridge + services: # Helper service to setup the Apache Kibble es node setup: @@ -12,6 +16,8 @@ services: - .:/kibble/ depends_on: - elasticsearch + networks: + - kibble # Apache Kibble API server kibble: @@ -25,6 +31,8 @@ services: - .:/kibble/ depends_on: - elasticsearch + networks: + - kibble # Apache Kibble web ui server ui: @@ -36,6 +44,8 @@ services: - 8000:8000 depends_on: - kibble + networks: + - kibble # Elasticsearch node required as a database for Apache Kibble elasticsearch: @@ -51,6 +61,8 @@ services: ES_JAVA_OPTS: -Xms256m -Xmx256m volumes: - "kibble-es-data:/usr/share/elasticsearch/data" + networks: + - kibble # Kibana to view and manage Elasticsearch kibana: @@ -59,6 +71,11 @@ services: - 5601:5601 depends_on: - elasticsearch + environment: + ELASTICSEARCH_URL: http://elasticsearch:9200 + ELASTICSEARCH_HOSTS: http://elasticsearch:9200 + networks: + - kibble volumes: # named volumes can be managed easier using docker-compose diff --git a/kibble/api/handler.py b/kibble/api/handler.py index d409b54d..8384d183 100644 --- a/kibble/api/handler.py +++ b/kibble/api/handler.py @@ -27,12 +27,11 @@ import sys import traceback -import yaml - from kibble.api.plugins import openapi from kibble.api.plugins.database import KibbleDatabase from kibble.api.plugins.session import KibbleSession -from kibble.settings import KIBBLE_YAML, YAML_DIRECTORY +from kibble.configuration import conf +from kibble.settings import YAML_DIRECTORY # Compile valid API URLs from the pages library # Allow backwards compatibility by also accepting .lua URLs @@ -45,10 +44,6 @@ urls.append((r"^(/api/%s)(/.+)?$" % page, handler.run)) -# Load Kibble master configuration -with open(KIBBLE_YAML, "r") as f: - config = yaml.safe_load(f) - # Instantiate database connections DB = None @@ -156,13 +151,13 @@ def application(environ, start_response): Checks against the pages library, and if submod found, runs it and returns the output. """ - db = KibbleDatabase(config) + db = KibbleDatabase(conf) path = environ.get("PATH_INFO", "") for regex, function in urls: m = re.match(regex, path) if m: callback = KibbleAPIWrapper(path, function) - session = KibbleSession(db, environ, config) + session = KibbleSession(db, environ, conf) a = 0 for bucket in callback(environ, start_response, session): if a == 0: diff --git a/kibble/api/plugins/database.py b/kibble/api/plugins/database.py index fb1a9fab..57467213 100644 --- a/kibble/api/plugins/database.py +++ b/kibble/api/plugins/database.py @@ -22,6 +22,8 @@ import elasticsearch +from kibble.configuration import KibbleConfigParser + class KibbleESWrapper(object): """ @@ -119,24 +121,13 @@ def count(self, index, doc_type="*", body=None): class KibbleDatabase(object): - def __init__(self, config): + def __init__(self, config: KibbleConfigParser): self.config = config - self.dbname = config["elasticsearch"]["dbname"] + self.dbname = config.get("elasticsearch", "dbname") self.ES = elasticsearch.Elasticsearch( - [ - { - "host": config["elasticsearch"]["host"], - "port": int(config["elasticsearch"]["port"]), - "use_ssl": config["elasticsearch"]["ssl"], - "verify_certs": False, - "url_prefix": config["elasticsearch"]["uri"] - if "uri" in config["elasticsearch"] - else "", - "http_auth": config["elasticsearch"]["auth"] - if "auth" in config["elasticsearch"] - else None, - } - ], + [config.get("elasticsearch", "conn_uri")], + use_ssl=config.getboolean("elasticsearch", "ssl"), + verify_certs=False, max_retries=5, retry_on_timeout=True, ) From 275c55f8173c9d19c6f155ff2240f4bba629a270 Mon Sep 17 00:00:00 2001 From: Kaxil Naik Date: Sun, 13 Dec 2020 02:07:05 +0000 Subject: [PATCH 26/48] Update pre-commits hook from 2.3 to 3.0 (#101) 3.3 is out for https://github.com/pre-commit/pre-commit-hooks --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index c80ddd9f..e471779c 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -24,7 +24,7 @@ minimum_pre_commit_version: "1.20.0" repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v2.3.0 + rev: v3.3.0 hooks: - id: check-yaml - id: end-of-file-fixer From 36e2cf748c84957b57a9612e794f90271d6dbe8f Mon Sep 17 00:00:00 2001 From: Kaxil Naik Date: Sun, 13 Dec 2020 02:07:17 +0000 Subject: [PATCH 27/48] Fix asf.yaml INFRA link (#102) Replace old link with new link --- .asf.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.asf.yaml b/.asf.yaml index 98f624ad..0e43aed5 100644 --- a/.asf.yaml +++ b/.asf.yaml @@ -15,7 +15,7 @@ # specific language governing permissions and limitations # under the License. -# https://cwiki.apache.org/confluence/display/INFRA/.asf.yaml+features+for+git+repositories +# https://cwiki.apache.org/confluence/display/INFRA/git+-+.asf.yaml+features --- github: description: "Apache Kibble - a tool to collect, aggregate and visualize data about any software project" From d285764348e588847844e3bf353e4af7d639c38c Mon Sep 17 00:00:00 2001 From: Kaxil Naik Date: Sun, 13 Dec 2020 02:07:33 +0000 Subject: [PATCH 28/48] Replace mention of 'master' branch with 'main' (#103) `master` branch was renamed with `main`. This should reflect in doc. --- docs/source/setup.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/setup.rst b/docs/source/setup.rst index 67bffd7a..04c7ab39 100644 --- a/docs/source/setup.rst +++ b/docs/source/setup.rst @@ -100,7 +100,7 @@ Source Code Location *Apache Kibble does not currently have any releases.* *You are however welcome to try out the development version.* -For the time being, we recommend that you use the ``master`` branch for +For the time being, we recommend that you use the ``main`` branch for testing Kibble. This applies to both scanners and the server. The Kibble Server can be found via our source repository at From e07e6019802d82a15373c100bfb6e87af0f9b0f6 Mon Sep 17 00:00:00 2001 From: Kaxil Naik Date: Sun, 13 Dec 2020 02:07:51 +0000 Subject: [PATCH 29/48] Use https URL for Kibble in setup.py (#104) Since https://kibble.apache.org/ is available we should use that instead of http --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 6262a087..044da496 100644 --- a/setup.py +++ b/setup.py @@ -88,7 +88,7 @@ def do_setup(): ], author="Apache Software Foundation", author_email="dev@kibble.apache.org", - url="http://kibble.apache.org/", + url="https://kibble.apache.org/", download_url=f"https://archive.apache.org/dist/kibble/{version}", test_suite="setup.kibble_test_suite", python_requires="~=3.8", From 076bbe473ac7cc959b2c103e2d5a57b3ad03d872 Mon Sep 17 00:00:00 2001 From: Kaxil Naik Date: Sun, 13 Dec 2020 02:08:14 +0000 Subject: [PATCH 30/48] Fix various typos (#105) `Kible` -> `Kibble` `milis` -> `milliseconds` `offlined` -> `offline` `annalyze` -> `analyze` `Verrifying` -> `Verifying` `betwen` -> `between` `fechthing` -> `fetching` --- kibble/scanners/scanners/bugzilla.py | 2 +- kibble/scanners/scanners/buildbot.py | 4 ++-- kibble/scanners/scanners/git-census.py | 2 +- kibble/scanners/scanners/git-evolution.py | 2 +- kibble/scanners/scanners/git-sloc.py | 2 +- kibble/scanners/scanners/git-sync.py | 2 +- kibble/scanners/scanners/twitter.py | 2 +- kibble/scanners/utils/tone.py | 2 +- kibble/scanners/utils/urlmisc.py | 2 +- 9 files changed, 10 insertions(+), 10 deletions(-) diff --git a/kibble/scanners/scanners/bugzilla.py b/kibble/scanners/scanners/bugzilla.py index 6b55e4fb..dd7ae57f 100644 --- a/kibble/scanners/scanners/bugzilla.py +++ b/kibble/scanners/scanners/bugzilla.py @@ -15,7 +15,7 @@ # specific language governing permissions and limitations # under the License. -""" This is the BugZilla scanner plugin for Kible """ +""" This is the BugZilla scanner plugin for Kibble """ import hashlib import json diff --git a/kibble/scanners/scanners/buildbot.py b/kibble/scanners/scanners/buildbot.py index 72561ff3..190fb5e3 100644 --- a/kibble/scanners/scanners/buildbot.py +++ b/kibble/scanners/scanners/buildbot.py @@ -100,7 +100,7 @@ def scanJob(KibbleBit, source, job, creds): "date": time.strftime("%Y/%m/%d %H:%M:%S", time.gmtime(FIN)), "buildID": buildno, "completed": completed, - "duration": DUR * 1000, # Buildbot does seconds, not milis + "duration": DUR * 1000, # Buildbot does seconds, not milliseconds "job": job, "jobURL": "%s/builders/%s" % (source["sourceURL"], job), "status": status, @@ -221,7 +221,7 @@ def scan(KibbleBit, source): if data["state"] == "building": building += 1 if data.get("pendingBuilds", 0) > 0: - # All queued items, even offlined builders + # All queued items, even offline builders actualQueueSize += data.get("pendingBuilds", 0) # Only queues with an online builder (actually waiting stuff) if data["state"] == "building": diff --git a/kibble/scanners/scanners/git-census.py b/kibble/scanners/scanners/git-census.py index 33a0920a..d5074260 100644 --- a/kibble/scanners/scanners/git-census.py +++ b/kibble/scanners/scanners/git-census.py @@ -32,7 +32,7 @@ def accepts(source): """ Do we accept this source?? """ if source["type"] == "git": return True - # There are cases where we have a github repo, but don't wanna annalyze the code, just issues + # There are cases where we have a github repo, but don't wanna analyze the code, just issues if source["type"] == "github" and source.get("issuesonly", False) == False: return True return False diff --git a/kibble/scanners/scanners/git-evolution.py b/kibble/scanners/scanners/git-evolution.py index f391e421..130eabd1 100644 --- a/kibble/scanners/scanners/git-evolution.py +++ b/kibble/scanners/scanners/git-evolution.py @@ -33,7 +33,7 @@ def accepts(source): """ Do we accept this source? """ if source["type"] == "git": return True - # There are cases where we have a github repo, but don't wanna annalyze the code, just issues + # There are cases where we have a github repo, but don't wanna analyze the code, just issues if source["type"] == "github" and source.get("issuesonly", False) == False: return True return False diff --git a/kibble/scanners/scanners/git-sloc.py b/kibble/scanners/scanners/git-sloc.py index b1ef0811..d9633577 100644 --- a/kibble/scanners/scanners/git-sloc.py +++ b/kibble/scanners/scanners/git-sloc.py @@ -31,7 +31,7 @@ def accepts(source): """ Do we accept this source? """ if source["type"] == "git": return True - # There are cases where we have a github repo, but don't wanna annalyze the code, just issues + # There are cases where we have a github repo, but don't wanna analyze the code, just issues if source["type"] == "github" and source.get("issuesonly", False) == False: return True return False diff --git a/kibble/scanners/scanners/git-sync.py b/kibble/scanners/scanners/git-sync.py index 7956847b..e2e9422b 100644 --- a/kibble/scanners/scanners/git-sync.py +++ b/kibble/scanners/scanners/git-sync.py @@ -29,7 +29,7 @@ def accepts(source): """ Do we accept this source? """ if source["type"] == "git": return True - # There are cases where we have a github repo, but don't wanna annalyze the code, just issues + # There are cases where we have a github repo, but don't wanna analyze the code, just issues if source["type"] == "github" and source.get("issuesonly", False) == False: return True return False diff --git a/kibble/scanners/scanners/twitter.py b/kibble/scanners/scanners/twitter.py index 996c0487..de1846f8 100644 --- a/kibble/scanners/scanners/twitter.py +++ b/kibble/scanners/scanners/twitter.py @@ -113,7 +113,7 @@ def scan(KibbleBit, source): consumer_key=source["creds"].get("consumer_key", None), consumer_secret=source["creds"].get("consumer_secret", None), ) - KibbleBit.pprint("Verrifying twitter credentials...") + KibbleBit.pprint("Verifying twitter credentials...") try: t.VerifyCredentials() except: diff --git a/kibble/scanners/utils/tone.py b/kibble/scanners/utils/tone.py index 3571c89b..a41bc01a 100644 --- a/kibble/scanners/utils/tone.py +++ b/kibble/scanners/utils/tone.py @@ -181,7 +181,7 @@ def picoTone(KibbleBit, bodies): # Additional (optional) emotion weighting if "emotions" in doc: for k, v in doc["emotions"].items(): - mood[k] = v / 100 # Value is betwen 0 and 100. + mood[k] = v / 100 # Value is between 0 and 100. moods[int(doc["id"])] = mood # Replace moods[X] with the actual mood diff --git a/kibble/scanners/utils/urlmisc.py b/kibble/scanners/utils/urlmisc.py index cf0ee696..f03c5120 100644 --- a/kibble/scanners/utils/urlmisc.py +++ b/kibble/scanners/utils/urlmisc.py @@ -44,7 +44,7 @@ def unzip(url, creds=None, cookie=None): "Cookie": cookie, } request = urllib.request.Request(url, headers=headers) - # Try fechthing via python, fall back to wget (redhat == broken!) + # Try fetching via python, fall back to wget (redhat == broken!) decompressedFile = None try: result = urllib.request.urlopen(request) From f03b7edb55252c5223b24b848c1e8f80acf843ff Mon Sep 17 00:00:00 2001 From: Kaxil Naik Date: Sun, 13 Dec 2020 02:08:30 +0000 Subject: [PATCH 31/48] Use python:3.8-slim image for Dockerfile (#107) `python:3.8` -- 331.8 MB `python:3.8-slim` -- 41.67 MB https://hub.docker.com/_/python?tab=tags&page=1&ordering=last_updated&name=3.8 --- Dockerfile.dev | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile.dev b/Dockerfile.dev index 690be03d..db8243f8 100644 --- a/Dockerfile.dev +++ b/Dockerfile.dev @@ -15,11 +15,11 @@ # specific language governing permissions and limitations # under the License. -FROM python:3.8 +FROM python:3.8-slim USER root RUN apt-get update -RUN apt-get install -y gcc unzip +RUN apt-get install -y gcc git unzip COPY . /kibble/ From 1876559c776757608b6233d8e5aafd29389a3ab8 Mon Sep 17 00:00:00 2001 From: Kaxil Naik Date: Sun, 13 Dec 2020 02:09:23 +0000 Subject: [PATCH 32/48] Fix ES Memory locking issue (#108) This is recommended https://github.com/deviantony/docker-elk/issues/243#issuecomment-363861158 and in official ES docs: https://www.elastic.co/guide/en/elastic-stack-get-started/current/get-started-docker.html --- docker-compose-dev.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docker-compose-dev.yaml b/docker-compose-dev.yaml index d4d8b887..e4cbb0d4 100644 --- a/docker-compose-dev.yaml +++ b/docker-compose-dev.yaml @@ -59,6 +59,10 @@ services: cluster.initial_master_nodes: es01 cluster.name: kibble ES_JAVA_OPTS: -Xms256m -Xmx256m + ulimits: + memlock: + soft: -1 + hard: -1 volumes: - "kibble-es-data:/usr/share/elasticsearch/data" networks: From d9a3893d660517ee705948e0fbcf13c5ce1578a4 Mon Sep 17 00:00:00 2001 From: Kaxil Naik Date: Sun, 13 Dec 2020 02:10:18 +0000 Subject: [PATCH 33/48] Fix error when running Kibble Scanner (#109) --- kibble/scanners/brokers/kibbleES.py | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/kibble/scanners/brokers/kibbleES.py b/kibble/scanners/brokers/kibbleES.py index 5792dd8e..472afe59 100644 --- a/kibble/scanners/brokers/kibbleES.py +++ b/kibble/scanners/brokers/kibbleES.py @@ -335,18 +335,21 @@ def __init__(self, config): apidoc = es.get(index=es_config["database"], doc_type="api", id="current")[ "_source" ] + apidoc_db_version = int(apidoc["dbversion"]) # We currently accept and know how to use DB versions 1 and 2. - if apidoc["dbversion"] not in ACCEPTED_DB_VERSIONS: - if apidoc["dbversion"] > KIBBLE_DB_VERSION: + if apidoc_db_version not in ACCEPTED_DB_VERSIONS: + if apidoc_db_version > KIBBLE_DB_VERSION: sys.stderr.write( - "The database '%s' uses a newer structure format (version %u) than the scanners (version %u). Please upgrade your scanners.\n" - % (es_config["database"], apidoc["dbversion"], KIBBLE_DB_VERSION) + "The database '%s' uses a newer structure format (version %u) than the scanners " + "(version %u). Please upgrade your scanners.\n" + % (es_config["database"], apidoc_db_version, KIBBLE_DB_VERSION) ) sys.exit(-1) - if apidoc["dbversion"] < KIBBLE_DB_VERSION: + if apidoc_db_version < KIBBLE_DB_VERSION: sys.stderr.write( - "The database '%s' uses an older structure format (version %u) than the scanners (version %u). Please upgrade your main Kibble server.\n" - % (es_config["database"], apidoc["dbversion"], KIBBLE_DB_VERSION) + "The database '%s' uses an older structure format (version %u) than the scanners " + "(version %u). Please upgrade your main Kibble server.\n" + % (es_config["database"], apidoc_db_version, KIBBLE_DB_VERSION) ) sys.exit(-1) From 729256f0a02136929deac6c52708ccc922149c0b Mon Sep 17 00:00:00 2001 From: Kaxil Naik Date: Sun, 13 Dec 2020 02:11:12 +0000 Subject: [PATCH 34/48] Upgrade black to 20.8b1 (#112) Upgrade black pre-commit from `19.3b0` to `20.8b1` --- .pre-commit-config.yaml | 2 +- kibble/scanners/brokers/kibbleES.py | 4 ++-- kibble/scanners/kibble-scanner.py | 4 ++-- kibble/scanners/scanners/ponymail-kpe.py | 4 ++-- kibble/scanners/scanners/travis.py | 4 +--- kibble/scanners/utils/github.py | 3 +-- kibble/scanners/utils/urlmisc.py | 3 +-- 7 files changed, 10 insertions(+), 14 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index e471779c..9ff35d22 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -69,7 +69,7 @@ repos: - license-templates/LICENSE.txt - --fuzzy-match-generates-todo - repo: https://github.com/psf/black - rev: 19.3b0 + rev: 20.8b1 hooks: - id: black name: Black diff --git a/kibble/scanners/brokers/kibbleES.py b/kibble/scanners/brokers/kibbleES.py index 472afe59..0f3c9093 100644 --- a/kibble/scanners/brokers/kibbleES.py +++ b/kibble/scanners/brokers/kibbleES.py @@ -73,8 +73,8 @@ def exists(self, index): class KibbleESWrapperSeven: """ - Class for rewriting old-style queries to the new ones, - where doc_type is an integral part of the DB name and NOT USED (>= 7.x) + Class for rewriting old-style queries to the new ones, + where doc_type is an integral part of the DB name and NOT USED (>= 7.x) """ def __init__(self, ES): diff --git a/kibble/scanners/kibble-scanner.py b/kibble/scanners/kibble-scanner.py index 390a2631..6a51a41f 100644 --- a/kibble/scanners/kibble-scanner.py +++ b/kibble/scanners/kibble-scanner.py @@ -85,8 +85,8 @@ def isMine(ID, config): class scanThread(threading.Thread): - """ A thread object that grabs an item from the queue and processes - it, using whatever plugins will come out to play. """ + """A thread object that grabs an item from the queue and processes + it, using whatever plugins will come out to play.""" def __init__(self, broker, org, i, t=None, e=None): super(scanThread, self).__init__() diff --git a/kibble/scanners/scanners/ponymail-kpe.py b/kibble/scanners/scanners/ponymail-kpe.py index 7e93320e..478dfba1 100644 --- a/kibble/scanners/scanners/ponymail-kpe.py +++ b/kibble/scanners/scanners/ponymail-kpe.py @@ -28,8 +28,8 @@ version = "0.1.0" ROBITS = r"(git|gerrit|jenkins|hudson|builds|bugzilla)@" MAX_COUNT = ( - 100 -) # Max number of unparsed emails to handle (so we don't max out API credits!) + 100 # Max number of unparsed emails to handle (so we don't max out API credits!) +) def accepts(source): diff --git a/kibble/scanners/scanners/travis.py b/kibble/scanners/scanners/travis.py index d76ca571..c2157c64 100644 --- a/kibble/scanners/scanners/travis.py +++ b/kibble/scanners/scanners/travis.py @@ -327,9 +327,7 @@ def sclan(KibbleBit, source): KibbleBit.pprint("Job %u is building" % jobID) elif jobjs["state"] in ["created", "queued", "pending"]: queued += 1 - blocked += ( - 1 - ) # Queued in Travis generally means a job can't find an executor, and thus is blocked. + blocked += 1 # Queued in Travis generally means a job can't find an executor, and thus is blocked. KibbleBit.pprint("Job %u is pending" % jobID) KibbleBit.pprint("%u building, %u queued..." % (building, queued)) diff --git a/kibble/scanners/utils/github.py b/kibble/scanners/utils/github.py index 5284b1bf..1731fdba 100644 --- a/kibble/scanners/utils/github.py +++ b/kibble/scanners/utils/github.py @@ -29,8 +29,7 @@ def get_limited(url, params=None, auth=None): - """ Get a GitHub API response, keeping in mind that we may - be rate-limited by the abuse system """ + """Get a GitHub API response, keeping in mind that we may be rate-limited by the abuse system""" number_of_retries = 0 resp = requests.get(url, params=params, auth=auth) while resp.status_code == 403 and number_of_retries < 20: diff --git a/kibble/scanners/utils/urlmisc.py b/kibble/scanners/utils/urlmisc.py index f03c5120..e80b9f6f 100644 --- a/kibble/scanners/utils/urlmisc.py +++ b/kibble/scanners/utils/urlmisc.py @@ -27,8 +27,7 @@ def unzip(url, creds=None, cookie=None): - """ Attempts to download an unzip an archive. Returns the - temporary file path of the unzipped contents """ + """Attempts to download an unzip an archive. Returns the temporary file path of the unzipped contents""" headers = {} if creds: auth = str(base64.encodestring(bytes(creds)).replace("\n", "")) From 3485535775309330b06f6d077c2764b397aeac2d Mon Sep 17 00:00:00 2001 From: Kaxil Naik Date: Sun, 13 Dec 2020 02:12:10 +0000 Subject: [PATCH 35/48] Improve Code Quality (#113) - Comparison with None - Redundant Parenthesis - Remove unused variable - Fix mutable default arguments - Augmented assignment expressions --- kibble/scanners/brokers/kibbleES.py | 4 ++-- kibble/scanners/scanners/__init__.py | 2 +- kibble/scanners/scanners/bugzilla.py | 6 +++--- kibble/scanners/scanners/gerrit.py | 4 +++- kibble/scanners/scanners/git-census.py | 4 ++-- kibble/scanners/scanners/git-evolution.py | 2 +- kibble/scanners/scanners/jira.py | 6 +++--- kibble/scanners/scanners/pipermail.py | 4 ++-- kibble/scanners/scanners/ponymail-kpe.py | 2 +- kibble/scanners/scanners/ponymail-tone.py | 2 +- kibble/scanners/scanners/ponymail.py | 4 ++-- kibble/scanners/scanners/travis.py | 2 +- kibble/scanners/scanners/twitter.py | 12 ++++++------ kibble/scanners/utils/github.py | 10 +++++++--- kibble/scanners/utils/tone.py | 16 +++++----------- kibble/scanners/utils/urlmisc.py | 1 - 16 files changed, 40 insertions(+), 41 deletions(-) diff --git a/kibble/scanners/brokers/kibbleES.py b/kibble/scanners/brokers/kibbleES.py index 0f3c9093..16f999b2 100644 --- a/kibble/scanners/brokers/kibbleES.py +++ b/kibble/scanners/brokers/kibbleES.py @@ -114,7 +114,7 @@ def exists(self, index): # This is redundant, refactor later? def pprint(string, err=False): - line = "[core]: %s" % (string) + line = "[core]: %s" % string if err: sys.stderr.write(line + "\n") else: @@ -265,7 +265,7 @@ def sources(self, sourceType=None, view=None): ) for hit in res["hits"]["hits"]: - if sourceType == None or hit["_source"]["type"] == sourceType: + if sourceType is None or hit["_source"]["type"] == sourceType: s.append(hit["_source"]) return s diff --git a/kibble/scanners/scanners/__init__.py b/kibble/scanners/scanners/__init__.py index 7b51436b..771ba57d 100644 --- a/kibble/scanners/scanners/__init__.py +++ b/kibble/scanners/scanners/__init__.py @@ -58,4 +58,4 @@ def enumerate(): """ Returns the scanners as a dictionary, sorted by run-order """ for p in __all__: - yield (p, scanners[p]) + yield p, scanners[p] diff --git a/kibble/scanners/scanners/bugzilla.py b/kibble/scanners/scanners/bugzilla.py index dd7ae57f..e1f1bfb1 100644 --- a/kibble/scanners/scanners/bugzilla.py +++ b/kibble/scanners/scanners/bugzilla.py @@ -90,13 +90,13 @@ def wasclosed(js): if item["field"] == "status" and ( item["toString"] == "Closed" or item["toString"] == "Resolved" ): - return (True, citem["author"]) + return True, citem["author"] else: if "items" in js: for item in js["items"]: if item["field"] == "status" and item["toString"] == "Closed": - return (True, None) - return (False, None) + return True, None + return False, None def resolved(js): diff --git a/kibble/scanners/scanners/gerrit.py b/kibble/scanners/scanners/gerrit.py index ebd15a9e..b82dab6a 100644 --- a/kibble/scanners/scanners/gerrit.py +++ b/kibble/scanners/scanners/gerrit.py @@ -70,7 +70,9 @@ def get_commit_id(commit_message): return None -def get_all(base_url, f, params={}): +def get_all(base_url, f, params=None): + if params is None: + params = {} acc = [] while True: diff --git a/kibble/scanners/scanners/git-census.py b/kibble/scanners/scanners/git-census.py index d5074260..aa19a037 100644 --- a/kibble/scanners/scanners/git-census.py +++ b/kibble/scanners/scanners/git-census.py @@ -173,12 +173,12 @@ def scan(KibbleBit, source): alcseries[gname][ts] = {} if not ce in lcseries[gname][ts]: lcseries[gname][ts][ce] = [0, 0] - lcseries[gname][ts][ce][0] = lcseries[gname][ts][ce][0] + insert + lcseries[gname][ts][ce][0] += insert lcseries[gname][ts][ce][1] = lcseries[gname][ts][ce][0] + delete if not ae in alcseries[gname][ts]: alcseries[gname][ts][ae] = [0, 0] - alcseries[gname][ts][ae][0] = alcseries[gname][ts][ae][0] + insert + alcseries[gname][ts][ae][0] += insert alcseries[gname][ts][ae][1] = alcseries[gname][ts][ae][0] + delete if not ts in ctseries[gname]: diff --git a/kibble/scanners/scanners/git-evolution.py b/kibble/scanners/scanners/git-evolution.py index 130eabd1..07a7ada2 100644 --- a/kibble/scanners/scanners/git-evolution.py +++ b/kibble/scanners/scanners/git-evolution.py @@ -149,7 +149,7 @@ def scan(KibbleBit, source): inp = get_first_ref(gpath) if inp: ts = int(inp.split()[0]) - ts = ts - (ts % 86400) + ts -= ts % 86400 date = time.strftime("%Y-%b-%d 0:00", time.gmtime(ts)) # print("Starting from %s" % date) diff --git a/kibble/scanners/scanners/jira.py b/kibble/scanners/scanners/jira.py index efc22cf4..41dafa6b 100644 --- a/kibble/scanners/scanners/jira.py +++ b/kibble/scanners/scanners/jira.py @@ -91,15 +91,15 @@ def wasclosed(js): item["toString"].lower().find("closed") != -1 or item["toString"].lower().find("resolved") != -1 ): - return (True, citem.get("author", {})) + return True, citem.get("author", {}) else: if "items" in js: for item in js["items"]: if item["field"] == "status" and ( item["toString"].find("Closed") != -1 ): - return (True, None) - return (False, None) + return True, None + return False, None def resolved(js): diff --git a/kibble/scanners/scanners/pipermail.py b/kibble/scanners/scanners/pipermail.py index e361ad2e..606a97eb 100644 --- a/kibble/scanners/scanners/pipermail.py +++ b/kibble/scanners/scanners/pipermail.py @@ -89,7 +89,7 @@ def scan(KibbleBit, source): gzurl = "%s/%04u-%s.txt.gz" % (url, year, monthNames[month]) pd = datetime.date(year, month, 1).timetuple() dhash = hashlib.sha224( - (("%s %s") % (source["organisation"], gzurl)).encode( + ("%s %s" % (source["organisation"], gzurl)).encode( "ascii", errors="replace" ) ).hexdigest() @@ -218,7 +218,7 @@ def scan(KibbleBit, source): md = time.strftime("%Y/%m/%d %H:%M:%S", pd) mlhash = hashlib.sha224( ( - ("%s%s%s%s") + "%s%s%s%s" % ( key, source["sourceURL"], diff --git a/kibble/scanners/scanners/ponymail-kpe.py b/kibble/scanners/scanners/ponymail-kpe.py index 478dfba1..50edac75 100644 --- a/kibble/scanners/scanners/ponymail-kpe.py +++ b/kibble/scanners/scanners/ponymail-kpe.py @@ -115,7 +115,7 @@ def scan(KibbleBit, source): KPEs = kpe.azureKPE(KibbleBit, bodies) elif "picoapi" in KibbleBit.config: KPEs = kpe.picoKPE(KibbleBit, bodies) - if KPEs == False: + if not KPEs: KibbleBit.pprint("Hit rate limit, not trying further emails for now.") a = 0 diff --git a/kibble/scanners/scanners/ponymail-tone.py b/kibble/scanners/scanners/ponymail-tone.py index e5cc02ba..e0dea32e 100644 --- a/kibble/scanners/scanners/ponymail-tone.py +++ b/kibble/scanners/scanners/ponymail-tone.py @@ -116,7 +116,7 @@ def scan(KibbleBit, source): moods = tone.azureTone(KibbleBit, bodies) elif "picoapi" in KibbleBit.config: moods = tone.picoTone(KibbleBit, bodies) - if moods == False: + if not moods: KibbleBit.pprint("Hit rate limit, not trying further emails for now.") a = 0 diff --git a/kibble/scanners/scanners/ponymail.py b/kibble/scanners/scanners/ponymail.py index 78f0a4ae..92a5b0bc 100644 --- a/kibble/scanners/scanners/ponymail.py +++ b/kibble/scanners/scanners/ponymail.py @@ -148,7 +148,7 @@ def scan(KibbleBit, source): "%04u-%02u" % (year, month), ) dhash = hashlib.sha224( - (("%s %s") % (source["organisation"], statsurl)).encode( + ("%s %s" % (source["organisation"], statsurl)).encode( "ascii", errors="replace" ) ).hexdigest() @@ -206,7 +206,7 @@ def scan(KibbleBit, source): md = time.strftime("%Y/%m/%d %H:%M:%S", pd) mlhash = hashlib.sha224( ( - ("%s%s%s%s") + "%s%s%s%s" % (top[0], source["sourceURL"], source["organisation"], md) ).encode("ascii", errors="replace") ).hexdigest() # one unique id per month per mail thread diff --git a/kibble/scanners/scanners/travis.py b/kibble/scanners/scanners/travis.py index c2157c64..22b4975a 100644 --- a/kibble/scanners/scanners/travis.py +++ b/kibble/scanners/scanners/travis.py @@ -55,7 +55,7 @@ def scanJob(KibbleBit, source, bid, token, TLD): oURL = "https://api.travis-ci.%s/repo/%s/builds" % (TLD, bid) # For as long as pagination makes sense... - while last_page == False: + while not last_page: bURL = "https://api.travis-ci.%s/repo/%s/builds?limit=100&offset=%u" % ( TLD, bid, diff --git a/kibble/scanners/scanners/twitter.py b/kibble/scanners/scanners/twitter.py index de1846f8..fd6af5b2 100644 --- a/kibble/scanners/scanners/twitter.py +++ b/kibble/scanners/scanners/twitter.py @@ -47,9 +47,9 @@ def getFollowers(KibbleBit, source, t): no_followers = tuser.followers_count d = time.strftime("%Y/%m/%d 0:00:00", time.gmtime()) # Today at midnight dhash = hashlib.sha224( - ( - ("twitter:%s:%s:%s") % (source["organisation"], source["sourceURL"], d) - ).encode("ascii", errors="replace") + ("twitter:%s:%s:%s" % (source["organisation"], source["sourceURL"], d)).encode( + "ascii", errors="replace" + ) ).hexdigest() jst = { "organisation": source["organisation"], @@ -74,9 +74,9 @@ def getFollowers(KibbleBit, source, t): # Store twitter follower profile if not already logged dhash = hashlib.sha224( - ( - ("twitter:%s:%s:%s") % (source["organisation"], handle, follower.id) - ).encode("ascii", errors="replace") + ("twitter:%s:%s:%s" % (source["organisation"], handle, follower.id)).encode( + "ascii", errors="replace" + ) ).hexdigest() if not KibbleBit.exists("twitter_follow", dhash): jst = { diff --git a/kibble/scanners/utils/github.py b/kibble/scanners/utils/github.py index 1731fdba..251e9803 100644 --- a/kibble/scanners/utils/github.py +++ b/kibble/scanners/utils/github.py @@ -52,7 +52,9 @@ def get_tokens_left(auth=None): return tokens_left -def issues(source, params={}, auth=None): +def issues(source, params=None, auth=None): + if params is None: + params = {} local_params = {"per_page": 100, "page": 1} local_params.update(params) @@ -79,7 +81,9 @@ def user(user_url, auth=None): return get_limited(user_url, auth=auth) -def get_all(source, f, params={}, auth=None): +def get_all(source, f, params=None, auth=None): + if params is None: + params = {} acc = [] page = params.get("page", 1) @@ -91,7 +95,7 @@ def get_all(source, f, params={}, auth=None): acc.extend(items) - page = page + 1 + page += 1 params.update({"page": page}) return acc diff --git a/kibble/scanners/utils/tone.py b/kibble/scanners/utils/tone.py index a41bc01a..c920b5ed 100644 --- a/kibble/scanners/utils/tone.py +++ b/kibble/scanners/utils/tone.py @@ -164,20 +164,14 @@ def picoTone(KibbleBit, bodies): if "results" in jsout and len(jsout["results"]) > 0: for doc in jsout["results"]: - mood = {} + mood = { + "negative": doc["negativity"], + "positive": doc["positivity"], + "neutral": doc["neutrality"], + } # Sentiment is the overall score, and we use that for the neutrality of a text - mood["negative"] = doc[ - "negativity" - ] # Use the direct Bayesian score from picoAPI - mood["positive"] = doc[ - "positivity" - ] # Use the direct Bayesian score from picoAPI - mood["neutral"] = doc[ - "neutrality" - ] # Calc neutrality to favor a middle sentiment score, ignore high/low - # Additional (optional) emotion weighting if "emotions" in doc: for k, v in doc["emotions"].items(): diff --git a/kibble/scanners/utils/urlmisc.py b/kibble/scanners/utils/urlmisc.py index e80b9f6f..ab0db4f5 100644 --- a/kibble/scanners/utils/urlmisc.py +++ b/kibble/scanners/utils/urlmisc.py @@ -58,7 +58,6 @@ def unzip(url, creds=None, cookie=None): subprocess.check_call(("/usr/bin/wget", "-O", tmpfile.name, url)) try: - te compressedFile = open("/tmp/kibbletmp.gz", "rb") if compressedFile.read(2) == "\x1f\x8b": compressedFile.seek(0) From c14a0533279c025e58e974b6f094197d3f18422b Mon Sep 17 00:00:00 2001 From: Kaxil Naik Date: Sun, 13 Dec 2020 02:13:13 +0000 Subject: [PATCH 36/48] Fix unresolved reference error in kibble/api/plugins/database.py (#111) There was no such variable `ESVersion` -- possibly a typo as `self.ESversion` is defined on line 147 --- kibble/api/plugins/database.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kibble/api/plugins/database.py b/kibble/api/plugins/database.py index 57467213..f50b1765 100644 --- a/kibble/api/plugins/database.py +++ b/kibble/api/plugins/database.py @@ -138,5 +138,5 @@ def __init__(self, config: KibbleConfigParser): self.ESversion = int(self.ES.info()["version"]["number"].split(".")[0]) if self.ESversion >= 7: self.ES = KibbleESWrapperSeven(self.ES) - elif self.ESVersion >= 6: + elif self.ESversion >= 6: self.ES = KibbleESWrapper(self.ES) From 5bf37a8c0db83c918fa476a3a1b653390026e169 Mon Sep 17 00:00:00 2001 From: Kaxil Naik Date: Sun, 13 Dec 2020 10:37:51 +0000 Subject: [PATCH 37/48] Install cloc in Dockerfile to fix git scanner (#110) Install cloc (https://github.com/AlDanial/cloc). `cloc` is a Prerequisite as mentioned in https://apache-kibble.readthedocs.io/en/latest/setup.html#installing-the-server Without it the count in the git scanner fails. --- Dockerfile.dev | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile.dev b/Dockerfile.dev index db8243f8..78500b60 100644 --- a/Dockerfile.dev +++ b/Dockerfile.dev @@ -19,7 +19,7 @@ FROM python:3.8-slim USER root RUN apt-get update -RUN apt-get install -y gcc git unzip +RUN apt-get install -y gcc git unzip cloc COPY . /kibble/ From 89748e42e611842babff57199ed82b4b8175f2c6 Mon Sep 17 00:00:00 2001 From: Kaxil Naik Date: Sun, 13 Dec 2020 12:32:56 +0000 Subject: [PATCH 38/48] Fix failing test & Run tests on CI (#115) This PR adds Github Action job to run tests on CI & fix a failing test --- .github/workflows/ci.yaml | 10 ++++++++++ tests/test_configuration.py | 2 +- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index fc571d8b..602c41e1 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -39,3 +39,13 @@ jobs: run: docker run apache/kibble kibble --help - name: Check dependencies run: docker run apache/kibble pip check + run-tests: + name: Run Tests + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: actions/setup-python@v2 + with: + python-version: '3.8' + - run: pip install '.[devel]' + - run: pytest tests diff --git a/tests/test_configuration.py b/tests/test_configuration.py index 32c13f1d..74e8c038 100644 --- a/tests/test_configuration.py +++ b/tests/test_configuration.py @@ -28,7 +28,7 @@ class TestDefaultConfig: ("accounts", "verify", True), ("api", "database", 2), ("api", "version", "0.1.0"), - ("elasticsearch", "conn_uri", "elasticsearch:9200"), + ("elasticsearch", "conn_uri", "http://elasticsearch:9200"), ("mail", "mailhost", "localhost:25"), ], ) From ea59ae0c18596566d037397fd733d18060b5f3f8 Mon Sep 17 00:00:00 2001 From: Kaxil Naik Date: Sun, 13 Dec 2020 12:44:59 +0000 Subject: [PATCH 39/48] Show colored output for Pytest & Show diff on pre-commits (#116) --- .github/workflows/ci.yaml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 602c41e1..8ed3163b 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -28,6 +28,8 @@ jobs: - uses: actions/checkout@v2 - uses: actions/setup-python@v2 - uses: pre-commit/action@v1.0.1 + with: + extra_args: --show-diff-on-failure build-docker: name: Build kibble dev image runs-on: ubuntu-latest @@ -48,4 +50,4 @@ jobs: with: python-version: '3.8' - run: pip install '.[devel]' - - run: pytest tests + - run: pytest tests --color=yes From f3a4b2710e9f7466010e41d4a3ad37f9935d2456 Mon Sep 17 00:00:00 2001 From: Tomek Urbaszek Date: Sun, 13 Dec 2020 12:47:01 +0000 Subject: [PATCH 40/48] Add scan command to run scanners (#114) Co-authored-by: Kaxil Naik --- kibble.ini | 17 +++ kibble/__main__.py | 52 ++++++++ kibble/cli/scanner_command.py | 150 +++++++++++++++++++++ kibble/scanners/README.md | 56 +++----- kibble/scanners/brokers/kibbleES.py | 88 ++++++------- kibble/scanners/kibble-scanner.py | 197 ---------------------------- 6 files changed, 275 insertions(+), 285 deletions(-) create mode 100644 kibble/cli/scanner_command.py delete mode 100644 kibble/scanners/kibble-scanner.py diff --git a/kibble.ini b/kibble.ini index 527d574b..72f0e0cb 100644 --- a/kibble.ini +++ b/kibble.ini @@ -8,6 +8,23 @@ database = 2 # Version f the API version = 0.1.0 +[broker] +enabled = false +url = https://localhost/api/ +username = kibble +password = kibble4life + +[scanner] +# scratchdir: Location for storing file objects like git repos etc +# This should be permanent to speed up scans of large repositories +# on consecutive scans, but may be ephemeral like /tmp +scratchdir = /tmp +# If you are load balancing the scans, you should specify +# how many nodes are working, and which one you are, +# using the format: $nodeNo/$totalNodes. If there are 4 nodes, +# each node will gat 1/4th of all jobs to work on. +balance = + [elasticsearch] # Elasticsearch database name dbname = kibble diff --git a/kibble/__main__.py b/kibble/__main__.py index 4b1c6b56..8e266a89 100644 --- a/kibble/__main__.py +++ b/kibble/__main__.py @@ -14,11 +14,13 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. +from typing import List import click from kibble.cli import setup_command from kibble.cli.make_account_command import make_account_cmd +from kibble.cli.scanner_command import scan_cmd from kibble.configuration import conf from kibble.version import version as kibble_version @@ -100,6 +102,56 @@ def make_account( ) +@cli.command("scan", short_help="starts scanning process") +@click.option( + "-t", + "--type", + "scanners", + help="Specific type of scanner to run (default is run all scanners). Can be used multiple times.", + multiple=True, +) +@click.option( + "-e", + "--exclude", + help="Specific type of scanner(s) to exclude. Can be used multiple times.", + multiple=True, +) +@click.option( + "-o", + "--org", + help="The organisation to gather stats for. If left out, all organisations will be scanned.", +) +@click.option( + "-a", + "--age", + help="Minimum age in hours before performing a new scan on an already processed source. " + "--age 12 will not process any source that was processed less than 12 hours ago, but " + "will process new sources.", +) +@click.option("-s", "--source", help="Specific source (wildcard) to run scans on.") +@click.option( + "-v", + "--view", + help="Specific source view to scan (default is scan all sources).", +) +def run_scan( + scanners: List[str] = None, + exclude: List[str] = None, + org: str = None, + age: int = None, + source: str = None, + view: str = None, +): + scan_cmd( + scanners=scanners, + exclude=exclude, + org=org, + age=age, + source=source, + view=view, + ) + + def main(): cli() diff --git a/kibble/cli/scanner_command.py b/kibble/cli/scanner_command.py new file mode 100644 index 00000000..2388c0ce --- /dev/null +++ b/kibble/cli/scanner_command.py @@ -0,0 +1,150 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import multiprocessing +import threading +import time +from typing import List + +from kibble.configuration import conf +from kibble.scanners.brokers import kibbleES + +PENDING_OBJECTS = [] +BIG_LOCK = threading.Lock() + + +def is_mine(id_): + balance = conf.get("scanner", "balance") + if not balance: + return False + node_no, num_nodes = balance.split("/") + node_no, num_nodes = int(node_no), int(num_nodes) + if num_nodes == 0: + return True + bignum = int(id_, 16) % num_nodes + if bignum == node_no - 1: + return True + return False + + +class ScanThread(threading.Thread): + """ + A thread object that grabs an item from the queue and processes + it, using whatever plugins will come out to play. + """ + + def __init__(self, broker, org, i, t=None, e=None): + super(ScanThread, self).__init__() + self.broker = broker + self.org = org + self.id = i + self.bit = self.broker.bitClass(self.broker, self.org, i) + self.stype = t + self.exclude = e + print("Initialized thread %i" % i) + + def run(self): + from kibble.scanners import scanners + + global BIG_LOCK, PENDING_OBJECTS + time.sleep(0.5) # Primarily to align printouts. + # While there are objects to snag + while PENDING_OBJECTS: + BIG_LOCK.acquire(blocking=True) + try: + # Try grabbing an object (might not be any left!) + obj = PENDING_OBJECTS.pop(0) + # If load balancing jobs, make sure this one is ours + if is_mine(obj["sourceID"]): + # Run through list of scanners in order, apply when useful + for sid, scanner in scanners.enumerate(): + if scanner.accepts(obj): + self.bit.pluginname = "plugins/scanners/" + sid + # Excluded scanner type? + if self.exclude and sid in self.exclude: + continue + # Specific scanner type or no types mentioned? + if not self.stype or self.stype == sid: + scanner.scan(self.bit, obj) + except: + break + finally: + BIG_LOCK.release() + self.bit.pluginname = "core" + self.bit.pprint("No more objects, exiting!") + + +def scan_cmd( + scanners: List[str] = None, + exclude: List[str] = None, + org: str = None, + age: int = None, + source: str = None, + view: str = None, +): + global PENDING_OBJECTS + + print("Kibble Scanner starting") + print("Using direct ElasticSearch broker model") + broker = kibbleES.Broker() + + org_no = 0 + source_no = 0 + for org_item in broker.organisations(): + if not org or org == org_item.id: + print(f"Processing organisation {org_item.id}") + org_no += 1 + + # Compile source list + # If --age is passed, only append source that either + # have never been scanned, or have been scanned more than + # N hours ago by any scanner. + if age: + minAge = time.time() - int(age) * 3600 + for source_item in org_item.sources(view=view): + tooNew = False + if "steps" in source_item: + for _, step in source_item["steps"].items(): + if "time" in step and step["time"] >= minAge: + tooNew = True + break + if not tooNew: + if not source or (source == source_item["sourceID"]): + PENDING_OBJECTS.append(source) + else: + PENDING_OBJECTS = [] + for source_item in org_item.sources(view=view): + if not source or (source == source_item["sourceID"]): + PENDING_OBJECTS.append(source_item) + source_no += len(PENDING_OBJECTS) + + # Start up some threads equal to number of cores on the box, + # but no more than 4. We don't want an IOWait nightmare. + threads = [] + core_count = min((4, int(multiprocessing.cpu_count()))) + for i in range(1, core_count): + s_thread = ScanThread(broker, org_item, i + 1, scanners, exclude) + s_thread.start() + threads.append(s_thread) + + # Wait for them all to finish. + for t in threads: + t.join() + + print( + f"All done scanning for now, found {org_no} organisations and {source_no} sources to process." + ) diff --git a/kibble/scanners/README.md b/kibble/scanners/README.md index d32c228b..f41eee6c 100644 --- a/kibble/scanners/README.md +++ b/kibble/scanners/README.md @@ -7,46 +7,32 @@ The Kibble Scanners collect information for the Kibble Suite. ## How to run: - - On a daily/weekly/whatever basis, run: `python3 src/kibble-scanner.py`. + - On a daily/weekly/whatever basis, run: `kibble scan`. ### Command line options: - usage: kibble-scanner.py [-h] [-o ORG] [-f CONFIG] [-a AGE] [-s SOURCE] - [-n NODES] [-t TYPE] [-e EXCLUDE [EXCLUDE ...]] - [-v VIEW] +``` +Usage: kibble scan [OPTIONS] - optional arguments: - -h, --help show this help message and exit - -o ORG, --org ORG The organisation to gather stats for. If left out, all - organisations will be scanned. - -f CONFIG, --config CONFIG - Location of the yaml config file (full path) - -a AGE, --age AGE Minimum age in hours before performing a new scan on - an already processed source. --age 12 will not process - any source that was processed less than 12 hours ago, - but will process new sources. - -s SOURCE, --source SOURCE - A specific source (wildcard) to run scans on. - -n NODES, --nodes NODES - Number of nodes in the cluster (used for load - balancing) - -t TYPE, --type TYPE Specific type of scanner to run (default is run all - scanners) - -e EXCLUDE [EXCLUDE ...], --exclude EXCLUDE [EXCLUDE ...] - Specific type of scanner(s) to exclude - -v VIEW, --view VIEW Specific source view to scan (default is scan all - sources) +Options: + -t, --type TEXT Specific type of scanner to run (default is run all + scanners) + -e, --exclude TEXT Specific type of scanner(s) to exclude + -o, --org TEXT The organisation to gather stats for. If left out, all + organisations will be scanned. -## Directory structure: + -a, --age TEXT Minimum age in hours before performing a new scan on an + already processed source. --age 12 will not process any + source that was processed less than 12 hours ago, but + will process new sources. - - `conf/`: Config files - - `src/`: - - - `kibble-scanner.py`: Main script for launching scans - - - `plugins/`: - - - - `brokers`: The various database brokers (ES or JSON API) - - - - `utils`: Utility libraries - - - - `scanners`: The individual scanner applications + -s, --source TEXT Specific source (wildcard) to run scans on. + -v, --view TEXT Specific source view to scan (default is scan all + sources) + + --help Show this message and exit. +``` ## Currently available scanner plugins: @@ -74,7 +60,3 @@ The Kibble Scanners collect information for the Kibble Suite. - python3-elasticsearch - python3-certifi - python3-yaml - - -# Get involved - TBD. Please see https://kibble.apache.org/ for details! diff --git a/kibble/scanners/brokers/kibbleES.py b/kibble/scanners/brokers/kibbleES.py index 16f999b2..4d372f63 100644 --- a/kibble/scanners/brokers/kibbleES.py +++ b/kibble/scanners/brokers/kibbleES.py @@ -16,10 +16,13 @@ # under the License. import sys +from urllib.parse import urlparse import elasticsearch import elasticsearch.helpers +from kibble.configuration import conf + KIBBLE_DB_VERSION = 2 # Current DB struct version ACCEPTED_DB_VERSIONS = [1, 2] # Versions we know how to work with. @@ -112,15 +115,6 @@ def exists(self, index): return self.ES.indices.exists(index=index) -# This is redundant, refactor later? -def pprint(string, err=False): - line = "[core]: %s" % string - if err: - sys.stderr.write(line + "\n") - else: - print(line) - - class KibbleBit: """ KibbleBit class with direct ElasticSearch access """ @@ -189,7 +183,7 @@ def append(self, t, doc): self.json_queue.append(doc) # If we've crossed the bulk limit, do a push if len(self.json_queue) > self.queueMax: - pprint("Bulk push forced") + print("Bulk push forced") self.bulk() def bulk(self): @@ -228,7 +222,7 @@ def bulk(self): try: elasticsearch.helpers.bulk(self.broker.oDB, js_arr) except Exception as err: - pprint("Warning: Could not bulk insert: %s" % err) + print("Warning: Could not bulk insert: %s" % err) class KibbleOrganisation: @@ -270,27 +264,27 @@ def sources(self, sourceType=None, view=None): return s -""" Master Kibble Broker Class for direct ElasticSearch access """ +class Broker: + """Master Kibble Broker Class for direct ElasticSearch access.""" + + def __init__(self): + conn_uri = conf.get("elasticsearch", "conn_uri") + parsed = urlparse(conf.get("elasticsearch", "conn_uri")) + self.dbname = conf.get("elasticsearch", "dbname") + user = conf.get("elasticsearch", "user", fallback=None) + password = conf.get("elasticsearch", "password", fallback=None) + auth = (user, password) if user else None -class Broker: - def __init__(self, config): - es_config = config["elasticsearch"] - auth = None - if "user" in es_config: - auth = (es_config["user"], es_config["password"]) - pprint( - "Connecting to ElasticSearch database at %s:%i..." - % (es_config["hostname"], es_config.get("port", 9200)) - ) + print(f"Connecting to ElasticSearch database at {conn_uri}") es = elasticsearch.Elasticsearch( [ { - "host": es_config["hostname"], - "port": int(es_config.get("port", 9200)), - "use_ssl": es_config.get("ssl", False), + "host": parsed.hostname, + "port": parsed.port, + "use_ssl": conf.getboolean("elasticsearch", "ssl"), "verify_certs": False, - "url_prefix": es_config.get("uri", ""), + "url_prefix": conf.get("elasticsearch", "uri"), "http_auth": auth, } ], @@ -298,10 +292,9 @@ def __init__(self, config): retry_on_timeout=True, ) es_info = es.info() - pprint("Connected!") + print("Connected!") self.DB = es self.oDB = es # Original ES class, always. the .DB may change - self.config = config self.bitClass = KibbleBit # This bit is required since ES 6.x and above don't like document types self.noTypes = ( @@ -311,54 +304,47 @@ def __init__(self, config): True if int(es_info["version"]["number"].split(".")[0]) >= 7 else False ) if self.noTypes: - pprint("This is a type-less DB, expanding database names instead.") + print("This is a type-less DB, expanding database names instead.") if self.seven: - pprint("We're using ES >= 7.x, NO DOC_TYPE!") + print("We're using ES >= 7.x, NO DOC_TYPE!") es = KibbleESWrapperSeven(es) else: es = KibbleESWrapper(es) self.DB = es - if not es.indices.exists(index=es_config["database"] + "_api"): - sys.stderr.write( - "Could not find database group %s_* in ElasticSearch!\n" - % es_config["database"] + if not es.indices.exists(index=self.dbname + "_api"): + raise SystemExit( + f"Could not find database group {self.dbname}_* in ElasticSearch!" ) - sys.exit(-1) else: - pprint("This DB supports types, utilizing..") - if not es.indices.exists(index=es_config["database"]): - sys.stderr.write( - "Could not find database %s in ElasticSearch!\n" - % es_config["database"] + print("This DB supports types, utilizing..") + if not es.indices.exists(index=self.dbname): + raise SystemExit( + f"Could not find database {self.dbname} in ElasticSearch!" ) - sys.exit(-1) - apidoc = es.get(index=es_config["database"], doc_type="api", id="current")[ - "_source" - ] + apidoc = es.get(index=self.dbname, doc_type="api", id="current")["_source"] apidoc_db_version = int(apidoc["dbversion"]) + # We currently accept and know how to use DB versions 1 and 2. if apidoc_db_version not in ACCEPTED_DB_VERSIONS: if apidoc_db_version > KIBBLE_DB_VERSION: - sys.stderr.write( + raise SystemExit( "The database '%s' uses a newer structure format (version %u) than the scanners " "(version %u). Please upgrade your scanners.\n" - % (es_config["database"], apidoc_db_version, KIBBLE_DB_VERSION) + % (self.dbname, apidoc_db_version, KIBBLE_DB_VERSION) ) - sys.exit(-1) if apidoc_db_version < KIBBLE_DB_VERSION: - sys.stderr.write( + raise SystemExit( "The database '%s' uses an older structure format (version %u) than the scanners " "(version %u). Please upgrade your main Kibble server.\n" - % (es_config["database"], apidoc_db_version, KIBBLE_DB_VERSION) + % (self.dbname, apidoc_db_version, KIBBLE_DB_VERSION) ) - sys.exit(-1) def organisations(self): """ Return a list of all organisations """ # Run the search, fetch all orgs, 9999 max. TODO: Scroll??? res = self.DB.search( - index=self.config["elasticsearch"]["database"], + index=self.dbname, doc_type="organisation", size=9999, body={"query": {"match_all": {}}}, diff --git a/kibble/scanners/kibble-scanner.py b/kibble/scanners/kibble-scanner.py deleted file mode 100644 index 6a51a41f..00000000 --- a/kibble/scanners/kibble-scanner.py +++ /dev/null @@ -1,197 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -import argparse -import multiprocessing -import os -import threading -import time -from pprint import pprint - -import yaml - -from kibble.scanners import scanners -from kibble.scanners.brokers import kibbleES - -VERSION = "0.1.0" -CONFIG_FILE = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.yaml") -PENDING_OBJECTS = [] -BIG_LOCK = threading.Lock() - - -def base_parser(): - arg_parser = argparse.ArgumentParser() - arg_parser.add_argument( - "-o", - "--org", - help="The organisation to gather stats for. If left out, all organisations will be scanned.", - ) - arg_parser.add_argument( - "-f", "--config", help="Location of the yaml config file (full path)" - ) - arg_parser.add_argument( - "-a", - "--age", - help="Minimum age in hours before performing a new scan on an already processed source. --age 12 will not process any source that was processed less than 12 hours ago, but will process new sources.", - ) - arg_parser.add_argument( - "-s", "--source", help="A specific source (wildcard) to run scans on." - ) - arg_parser.add_argument( - "-n", "--nodes", help="Number of nodes in the cluster (used for load balancing)" - ) - arg_parser.add_argument( - "-t", - "--type", - help="Specific type of scanner to run (default is run all scanners)", - ) - arg_parser.add_argument( - "-e", "--exclude", nargs="+", help="Specific type of scanner(s) to exclude" - ) - arg_parser.add_argument( - "-v", - "--view", - help="Specific source view to scan (default is scan all sources)", - ) - return arg_parser - - -def isMine(ID, config): - if config["scanner"].get("balance", None): - a = config["scanner"]["balance"].split("/") - nodeNo = int(a[0]) - numNodes = int(a[1]) - if numNodes == 0: - return True - bignum = int(ID, 16) % numNodes - if bignum == int(nodeNo) - 1: - return True - return False - return True - - -class scanThread(threading.Thread): - """A thread object that grabs an item from the queue and processes - it, using whatever plugins will come out to play.""" - - def __init__(self, broker, org, i, t=None, e=None): - super(scanThread, self).__init__() - self.broker = broker - self.org = org - self.id = i - self.bit = self.broker.bitClass(self.broker, self.org, i) - self.stype = t - self.exclude = e - pprint("Initialized thread %i" % i) - - def run(self): - global BIG_LOCK, PENDING_OBJECTS - time.sleep(0.5) # Primarily to align printouts. - # While there are objects to snag - while PENDING_OBJECTS: - BIG_LOCK.acquire(blocking=True) - try: - # Try grabbing an object (might not be any left!) - obj = PENDING_OBJECTS.pop(0) - except: - pass - BIG_LOCK.release() - if obj: - # If load balancing jobs, make sure this one is ours - if isMine(obj["sourceID"], self.broker.config): - # Run through list of scanners in order, apply when useful - for sid, scanner in scanners.enumerate(): - - if scanner.accepts(obj): - self.bit.pluginname = "plugins/scanners/" + sid - # Excluded scanner type? - if self.exclude and sid in self.exclude: - continue - # Specific scanner type or no types mentioned? - if not self.stype or self.stype == sid: - scanner.scan(self.bit, obj) - else: - break - self.bit.pluginname = "core" - self.bit.pprint("No more objects, exiting!") - - -def main(): - pprint("Kibble Scanner v/%s starting" % VERSION) - global CONFIG_FILE, PENDING_OBJECTS - args = base_parser().parse_args() - - # Load config yaml - if args.config: - CONFIG_FILE = args.config - config = yaml.load(open(CONFIG_FILE)) - pprint("Loaded YAML config from %s" % CONFIG_FILE) - - pprint("Using direct ElasticSearch broker model") - broker = kibbleES.Broker(config) - - orgNo = 0 - sourceNo = 0 - for org in broker.organisations(): - if not args.org or args.org == org.id: - pprint("Processing organisation %s" % org.id) - orgNo += 1 - - # Compile source list - # If --age is passed, only append source that either - # have never been scanned, or have been scanned more than - # N hours ago by any scanner. - if args.age: - minAge = time.time() - int(args.age) * 3600 - for source in org.sources(view=args.view): - tooNew = False - if "steps" in source: - for _, step in source["steps"].items(): - if "time" in step and step["time"] >= minAge: - tooNew = True - break - if not tooNew: - if not args.source or (args.source == source["sourceID"]): - PENDING_OBJECTS.append(source) - else: - PENDING_OBJECTS = [] - for source in org.sources(view=args.view): - if not args.source or (args.source == source["sourceID"]): - PENDING_OBJECTS.append(source) - sourceNo += len(PENDING_OBJECTS) - - # Start up some threads equal to number of cores on the box, - # but no more than 4. We don't want an IOWait nightmare. - threads = [] - core_count = min((4, int(multiprocessing.cpu_count()))) - for i in range(0, core_count): - sThread = scanThread(broker, org, i + 1, args.type, args.exclude) - sThread.start() - threads.append(sThread) - - # Wait for them all to finish. - for t in threads: - t.join() - - pprint( - "All done scanning for now, found %i organisations and %i sources to process." - % (orgNo, sourceNo) - ) - - -if __name__ == "__main__": - main() From b4025c27196bcaabf237923fd3580cd91287eacc Mon Sep 17 00:00:00 2001 From: Kaxil Naik Date: Sun, 13 Dec 2020 12:54:57 +0000 Subject: [PATCH 41/48] Fix name of config.yml in Github Issue Template (#117) https://docs.github.com/en/free-pro-team@latest/github/building-a-strong-community/configuring-issue-templates-for-your-repository#configuring-the-template-chooser The file name should be `config.yml` not `config.ymal` --- .github/ISSUE_TEMPLATE/{config.ymal => config.yml} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename .github/ISSUE_TEMPLATE/{config.ymal => config.yml} (100%) diff --git a/.github/ISSUE_TEMPLATE/config.ymal b/.github/ISSUE_TEMPLATE/config.yml similarity index 100% rename from .github/ISSUE_TEMPLATE/config.ymal rename to .github/ISSUE_TEMPLATE/config.yml From f042a599c034ff7003f1c5b2662a2cbc8b44bff8 Mon Sep 17 00:00:00 2001 From: Kaxil Naik Date: Sun, 13 Dec 2020 12:56:37 +0000 Subject: [PATCH 42/48] Add missing __init__.py file in plugins & cli directory (#118) --- kibble/api/plugins/__init__.py | 16 ++++++++++++++++ kibble/cli/__init__.py | 16 ++++++++++++++++ 2 files changed, 32 insertions(+) create mode 100644 kibble/api/plugins/__init__.py create mode 100644 kibble/cli/__init__.py diff --git a/kibble/api/plugins/__init__.py b/kibble/api/plugins/__init__.py new file mode 100644 index 00000000..13a83393 --- /dev/null +++ b/kibble/api/plugins/__init__.py @@ -0,0 +1,16 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. diff --git a/kibble/cli/__init__.py b/kibble/cli/__init__.py new file mode 100644 index 00000000..13a83393 --- /dev/null +++ b/kibble/cli/__init__.py @@ -0,0 +1,16 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. From 257992d8d26f9d40086398a8edbab5be1a905bf5 Mon Sep 17 00:00:00 2001 From: Kaxil Naik Date: Sun, 13 Dec 2020 13:26:28 +0000 Subject: [PATCH 43/48] Fix the path for the scanners in scanners/README.md (#119) The paths were incorrect, I think they are from before when they were in a separate repo --- kibble/scanners/README.md | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/kibble/scanners/README.md b/kibble/scanners/README.md index f41eee6c..87da385d 100644 --- a/kibble/scanners/README.md +++ b/kibble/scanners/README.md @@ -36,21 +36,21 @@ Options: ## Currently available scanner plugins: - - Apache Pony Mail (`plugins/scanners/ponymail.py`) - - Atlassian JIRA (`plugins/scanners/jira.py`) - - BugZilla Issue Tracker (`plugins/scanners/bugzilla.py`) - - BuildBot (`plugins/scanners/buildbot.py`) - - Discourse (`plugins/scanners/discourse.py`) - - Gerrit Code Review (`plugins/scanners/gerrit.py`) - - Git Repository Fetcher (`plugins/scanners/git-sync.py`) - - Git Census Counter (`plugins/scanners/git-census.py`) - - Git Code Evolution Counter (`plugins/scanners/git-evolution.py`) - - Git SLoC Counter (`plugins/scanners/git-sloc.py`) - - GitHub Issues/PRs (`plugins/scanners/github.py`) - - GitHub Traffic Statistics (`plugins/scanners/github-stats.py`) - - GNU Mailman Pipermail (`plugins/scanners/pipermail.py`) - - Jenkins (`plugins/scanners/jenkins.py`) - - Travis CI (`plugins/scanners/travis.py`) + - Apache Pony Mail (`scanners/ponymail.py`) + - Atlassian JIRA (`scanners/jira.py`) + - BugZilla Issue Tracker (`scanners/bugzilla.py`) + - BuildBot (`scanners/buildbot.py`) + - Discourse (`scanners/discourse.py`) + - Gerrit Code Review (`scanners/gerrit.py`) + - Git Repository Fetcher (`scanners/git-sync.py`) + - Git Census Counter (`scanners/git-census.py`) + - Git Code Evolution Counter (`scanners/git-evolution.py`) + - Git SLoC Counter (`scanners/git-sloc.py`) + - GitHub Issues/PRs (`scanners/github.py`) + - GitHub Traffic Statistics (`scanners/github-stats.py`) + - GNU Mailman Pipermail (`scanners/pipermail.py`) + - Jenkins (`scanners/jenkins.py`) + - Travis CI (`scanners/travis.py`) ## Requirements: From f8d731cedf51fcd5a19e05c8ed8db919f1c7838a Mon Sep 17 00:00:00 2001 From: Tomek Urbaszek Date: Sun, 13 Dec 2020 13:53:37 +0000 Subject: [PATCH 44/48] Improve code quality of scanners (#120) This PR replaces cameleCase with snake_case, removes some unused variables and adds some that in some cases may result in run time error. --- kibble/scanners/brokers/kibbleES.py | 2 +- kibble/scanners/scanners/bugzilla.py | 98 ++++++------ kibble/scanners/scanners/buildbot.py | 58 +++---- kibble/scanners/scanners/discourse.py | 62 ++++---- kibble/scanners/scanners/gerrit.py | 26 ++-- kibble/scanners/scanners/git-census.py | 27 ++-- kibble/scanners/scanners/git-evolution.py | 40 ++--- kibble/scanners/scanners/git-sloc.py | 14 +- kibble/scanners/scanners/git-sync.py | 36 ++--- kibble/scanners/scanners/github-issues.py | 52 ++++--- kibble/scanners/scanners/github-stats.py | 24 +-- kibble/scanners/scanners/jenkins.py | 93 ++++++------ kibble/scanners/scanners/jira.py | 176 +++++++++++----------- kibble/scanners/scanners/pipermail.py | 56 ++++--- kibble/scanners/scanners/ponymail-kpe.py | 43 +++--- kibble/scanners/scanners/ponymail-tone.py | 50 +++--- kibble/scanners/scanners/ponymail.py | 83 +++++----- kibble/scanners/scanners/travis.py | 158 +++++++++---------- kibble/scanners/scanners/twitter.py | 36 ++--- 19 files changed, 561 insertions(+), 573 deletions(-) diff --git a/kibble/scanners/brokers/kibbleES.py b/kibble/scanners/brokers/kibbleES.py index 4d372f63..5e6eb0db 100644 --- a/kibble/scanners/brokers/kibbleES.py +++ b/kibble/scanners/brokers/kibbleES.py @@ -141,7 +141,7 @@ def pprint(self, string, err=False): else: print(line) - def updateSource(self, source): + def update_source(self, source): """ Updates a source document, usually with a status update """ self.broker.DB.index( index=self.broker.config["elasticsearch"]["database"], diff --git a/kibble/scanners/scanners/bugzilla.py b/kibble/scanners/scanners/bugzilla.py index e1f1bfb1..fa1833ce 100644 --- a/kibble/scanners/scanners/bugzilla.py +++ b/kibble/scanners/scanners/bugzilla.py @@ -43,7 +43,7 @@ def accepts(source): return False -def getTime(string): +def get_time(string): return time.mktime( time.strptime(re.sub(r"[zZ]", "", str(string)), "%Y-%m-%dT%H:%M:%S") ) @@ -81,7 +81,7 @@ def moved(js): return False -def wasclosed(js): +def was_closed(js): if "changelog" in js: cjs = js["changelog"]["histories"] for citem in cjs: @@ -118,7 +118,7 @@ def pchange(js): return False -def scanTicket(bug, KibbleBit, source, openTickets, u, dom): +def scan_ticket(bug, kibble_bit, source, open_tickets, u, dom): try: key = bug["id"] dhash = hashlib.sha224( @@ -126,24 +126,24 @@ def scanTicket(bug, KibbleBit, source, openTickets, u, dom): "ascii", errors="replace" ) ).hexdigest() - found = KibbleBit.exists("issue", dhash) + found = kibble_bit.exists("issue", dhash) parseIt = False if not found: parseIt = True else: - ticket = KibbleBit.get("issue", dhash) - if ticket["status"] == "closed" and key in openTickets: - KibbleBit.pprint("Ticket was reopened, reparsing") + ticket = kibble_bit.get("issue", dhash) + if ticket["status"] == "closed" and key in open_tickets: + kibble_bit.pprint("Ticket was reopened, reparsing") parseIt = True - elif ticket["status"] == "open" and not key in openTickets: - KibbleBit.pprint("Ticket was recently closed, parsing it") + elif ticket["status"] == "open" and not key in open_tickets: + kibble_bit.pprint("Ticket was recently closed, parsing it") parseIt = True else: pass # print("Ticket hasn't changed, ignoring...") if parseIt: - KibbleBit.pprint("Parsing data from BugZilla for #%s" % key) + kibble_bit.pprint("Parsing data from BugZilla for #%s" % key) params = {"ids": [int(key)], "limit": 0} if ( @@ -163,12 +163,12 @@ def scanTicket(bug, KibbleBit, source, openTickets, u, dom): js = js["result"]["bugs"][0] creator = {"name": bug["creator"], "email": js["creator"]} closer = {} - cd = getTime(js["creation_time"]) + cd = get_time(js["creation_time"]) rd = None status = "open" if js["status"] in ["CLOSED", "RESOLVED"]: status = "closed" - KibbleBit.pprint("%s was closed, finding out who did that" % key) + kibble_bit.pprint("%s was closed, finding out who did that" % key) ticketsURL = "%s?method=Bug.history¶ms=[%s]" % ( u, urllib.parse.quote(json.dumps(params)), @@ -182,10 +182,10 @@ def scanTicket(bug, KibbleBit, source, openTickets, u, dom): and "added" in change and change["added"] in ["CLOSED", "RESOLVED"] ): - rd = getTime(item["when"]) + rd = get_time(item["when"]) closer = {"name": item["who"], "email": item["who"]} break - KibbleBit.pprint("Counting comments for %s..." % key) + kibble_bit.pprint("Counting comments for %s..." % key) ticketsURL = "%s?method=Bug.comments¶ms=[%s]" % ( u, urllib.parse.quote(json.dumps(params)), @@ -202,7 +202,7 @@ def scanTicket(bug, KibbleBit, source, openTickets, u, dom): "ascii", errors="replace" ) ).hexdigest() - found = KibbleBit.exists("person", pid) + found = kibble_bit.exists("person", pid) if not found: params["names"] = [closer["email"]] ticketsURL = "%s?method=User.get¶ms=[%s]" % ( @@ -213,7 +213,7 @@ def scanTicket(bug, KibbleBit, source, openTickets, u, dom): try: ujs = jsonapi.get(ticketsURL) displayName = ujs["result"]["users"][0]["real_name"] - except: + except: # pylint: disable=bare-except displayName = closer["email"] if displayName and len(displayName) > 0: # Add to people db @@ -225,7 +225,7 @@ def scanTicket(bug, KibbleBit, source, openTickets, u, dom): "id": pid, } # print("Updating person DB for closer: %s (%s)" % (displayName, closerEmail)) - KibbleBit.index("person", pid, jsp) + kibble_bit.index("person", pid, jsp) if creator: pid = hashlib.sha1( @@ -233,7 +233,7 @@ def scanTicket(bug, KibbleBit, source, openTickets, u, dom): "ascii", errors="replace" ) ).hexdigest() - found = KibbleBit.exists("person", pid) + found = kibble_bit.exists("person", pid) if not found: if not creator["name"]: params["names"] = [creator["email"]] @@ -244,7 +244,7 @@ def scanTicket(bug, KibbleBit, source, openTickets, u, dom): try: ujs = jsonapi.get(ticketsURL) creator["name"] = ujs["result"]["users"][0]["real_name"] - except: + except: # pylint: disable=bare-except creator["name"] = creator["email"] if creator["name"] and len(creator["name"]) > 0: # Add to people db @@ -255,7 +255,7 @@ def scanTicket(bug, KibbleBit, source, openTickets, u, dom): "organisation": source["organisation"], "id": pid, } - KibbleBit.index("person", pid, jsp) + kibble_bit.index("person", pid, jsp) jso = { "id": dhash, @@ -280,17 +280,17 @@ def scanTicket(bug, KibbleBit, source, openTickets, u, dom): "comments": comments, "title": title, } - KibbleBit.append("issue", jso) + kibble_bit.append("issue", jso) time.sleep(0.5) # BugZilla is notoriously slow. Maybe remove this later return True except Exception as err: - KibbleBit.pprint(err) + kibble_bit.pprint(err) return False -class bzThread(Thread): +class BzThread(Thread): def __init__(self, KibbleBit, source, block, pt, ot, u, dom): - super(bzThread, self).__init__() + super(BzThread, self).__init__() self.KibbleBit = KibbleBit self.source = source self.block = block @@ -300,9 +300,9 @@ def __init__(self, KibbleBit, source, block, pt, ot, u, dom): self.dom = dom def run(self): - badOnes = 0 + bad_ones = 0 - while len(self.pendingTickets) > 0 and badOnes <= 50: + while len(self.pendingTickets) > 0 and bad_ones <= 50: if len(self.pendingTickets) % 10 == 0: self.KibbleBit.pprint( "%u elements left to count" % len(self.pendingTickets) @@ -317,12 +317,12 @@ def run(self): self.block.release() return self.block.release() - if not scanTicket( + if not scan_ticket( rl, self.KibbleBit, self.source, self.openTickets, self.u, self.dom ): self.KibbleBit.pprint("Ticket %s seems broken, skipping" % rl["id"]) - badOnes += 1 - if badOnes > 50: + bad_ones += 1 + if bad_ones > 50: self.KibbleBit.pprint("Too many errors, bailing!") self.source["steps"]["issues"] = { "time": time.time(), @@ -331,13 +331,13 @@ def run(self): "running": False, "good": False, } - self.KibbleBit.updateSource(self.source) + self.KibbleBit.update_source(self.source) return else: - badOnes = 0 + bad_ones = 0 -def scan(KibbleBit, source): +def scan(kibble_bit, source): url = source["sourceURL"] source["steps"]["issues"] = { @@ -346,7 +346,7 @@ def scan(KibbleBit, source): "running": True, "good": True, } - KibbleBit.updateSource(source) + kibble_bit.update_source(source) bz = re.match(r"(https?://\S+?)(/jsonrpc\.cgi)?[\s:?]+(.+)", url) if bz: @@ -357,8 +357,8 @@ def scan(KibbleBit, source): and len(source["creds"]["username"]) > 0 ): creds = "%s:%s" % (source["creds"]["username"], source["creds"]["password"]) - pendingTickets = [] - openTickets = [] + pending_tickets = [] + open_tickets = [] # Get base URL, list and domain to parse dom = bz.group(1) @@ -404,20 +404,20 @@ def scan(KibbleBit, source): "offset": 1, } - ticketsURL = "%s?method=Bug.search¶ms=[%s]" % ( + tickets_url = "%s?method=Bug.search¶ms=[%s]" % ( u, urllib.parse.quote(json.dumps(params)), ) while True: try: - js = jsonapi.get(ticketsURL, auth=creds) - except: - KibbleBit.pprint("Couldn't fetch more tickets, bailing") + js = jsonapi.get(tickets_url, auth=creds) + except: # pylint: disable=bare-except + kibble_bit.pprint("Couldn't fetch more tickets, bailing") break if len(js["result"]["bugs"]) > 0: - KibbleBit.pprint( + kibble_bit.pprint( "%s: Found %u tickets..." % ( source["sourceURL"], @@ -425,28 +425,30 @@ def scan(KibbleBit, source): ) ) for bug in js["result"]["bugs"]: - pendingTickets.append(bug) + pending_tickets.append(bug) if not bug["status"] in ["RESOLVED", "CLOSED"]: - openTickets.append(bug["id"]) + open_tickets.append(bug["id"]) params["offset"] += 10000 - ticketsURL = "%s?method=Bug.search¶ms=[%s]" % ( + tickets_url = "%s?method=Bug.search¶ms=[%s]" % ( u, urllib.parse.quote(json.dumps(params)), ) else: - KibbleBit.pprint("No more tickets left to scan") + kibble_bit.pprint("No more tickets left to scan") break - KibbleBit.pprint( + kibble_bit.pprint( "Found %u open tickets, %u closed." - % (len(openTickets), len(pendingTickets) - len(openTickets)) + % (len(open_tickets), len(pending_tickets) - len(open_tickets)) ) block = Lock() threads = [] # TODO: Fix this loop for i in range(0, 4): - t = bzThread(KibbleBit, source, block, pendingTickets, openTickets, u, dom) + t = BzThread( + kibble_bit, source, block, pending_tickets, open_tickets, u, dom + ) threads.append(t) t.start() @@ -460,4 +462,4 @@ def scan(KibbleBit, source): "running": False, "good": True, } - KibbleBit.updateSource(source) + kibble_bit.update_source(source) diff --git a/kibble/scanners/scanners/buildbot.py b/kibble/scanners/scanners/buildbot.py index 190fb5e3..6d2e18d2 100644 --- a/kibble/scanners/scanners/buildbot.py +++ b/kibble/scanners/scanners/buildbot.py @@ -38,7 +38,7 @@ def accepts(source): return False -def scanJob(KibbleBit, source, job, creds): +def scan_job(KibbleBit, source, job, creds): """ Scans a single job for activity """ dhash = hashlib.sha224( ("%s-%s-%s" % (source["organisation"], source["sourceID"], job)).encode( @@ -64,7 +64,7 @@ def scanJob(KibbleBit, source, job, creds): builddoc = None try: builddoc = KibbleBit.get("ci_build", buildhash) - except: + except: # pylint: disable=bare-except pass # If this build already completed, no need to parse it again @@ -120,11 +120,11 @@ def scanJob(KibbleBit, source, job, creds): return False -class buildbotThread(threading.Thread): +class BuildbotThread(threading.Thread): """ Generic thread class for scheduling multiple scans at once """ def __init__(self, block, KibbleBit, source, creds, jobs): - super(buildbotThread, self).__init__() + super(BuildbotThread, self).__init__() self.block = block self.KibbleBit = KibbleBit self.creds = creds @@ -132,8 +132,8 @@ def __init__(self, block, KibbleBit, source, creds, jobs): self.jobs = jobs def run(self): - badOnes = 0 - while len(self.jobs) > 0 and badOnes <= 50: + bad_ones = 0 + while len(self.jobs) > 0 and bad_ones <= 50: self.block.acquire() try: job = self.jobs.pop(0) @@ -144,10 +144,10 @@ def run(self): self.block.release() return self.block.release() - if not scanJob(self.KibbleBit, self.source, job, self.creds): + if not scan_job(self.KibbleBit, self.source, job, self.creds): self.KibbleBit.pprint("[%s] This borked, trying another one" % job) - badOnes += 1 - if badOnes > 100: + bad_ones += 1 + if bad_ones > 100: self.KibbleBit.pprint("Too many errors, bailing!") self.source["steps"]["ci"] = { "time": time.time(), @@ -156,13 +156,13 @@ def run(self): "running": False, "good": False, } - self.KibbleBit.updateSource(self.source) + self.KibbleBit.update_source(self.source) return else: - badOnes = 0 + bad_ones = 0 -def scan(KibbleBit, source): +def scan(kibble_bit, source): # Simple URL check buildbot = re.match(r"(https?://.+)", source["sourceURL"]) if buildbot: @@ -173,16 +173,16 @@ def scan(KibbleBit, source): "running": True, "good": True, } - KibbleBit.updateSource(source) + kibble_bit.update_source(source) - KibbleBit.pprint("Parsing Buildbot activity at %s" % source["sourceURL"]) + kibble_bit.pprint("Parsing Buildbot activity at %s" % source["sourceURL"]) source["steps"]["ci"] = { "time": time.time(), "status": "Downloading changeset", "running": True, "good": True, } - KibbleBit.updateSource(source) + kibble_bit.update_source(source) # Buildbot may neeed credentials creds = None @@ -195,9 +195,9 @@ def scan(KibbleBit, source): creds = "%s:%s" % (source["creds"]["username"], source["creds"]["password"]) # Get the job list - sURL = source["sourceURL"] - KibbleBit.pprint("Getting job list...") - builders = jsonapi.get("%s/json/builders" % sURL, auth=creds) + s_url = source["sourceURL"] + kibble_bit.pprint("Getting job list...") + builders = jsonapi.get("%s/json/builders" % s_url, auth=creds) # Save queue snapshot NOW = int(datetime.datetime.utcnow().timestamp()) @@ -211,8 +211,8 @@ def scan(KibbleBit, source): # Scan queue items blocked = 0 stuck = 0 - queueSize = 0 - actualQueueSize = 0 + queue_size = 0 + actual_queue_size = 0 building = 0 jobs = [] @@ -222,10 +222,10 @@ def scan(KibbleBit, source): building += 1 if data.get("pendingBuilds", 0) > 0: # All queued items, even offline builders - actualQueueSize += data.get("pendingBuilds", 0) + actual_queue_size += data.get("pendingBuilds", 0) # Only queues with an online builder (actually waiting stuff) if data["state"] == "building": - queueSize += data.get("pendingBuilds", 0) + queue_size += data.get("pendingBuilds", 0) blocked += data.get("pendingBuilds", 0) # Blocked by running builds # Stuck builds (iow no builder available) if data["state"] == "offline": @@ -236,7 +236,7 @@ def scan(KibbleBit, source): "id": queuehash, "date": time.strftime("%Y/%m/%d %H:%M:%S", time.gmtime(NOW)), "time": NOW, - "size": queueSize, + "size": queue_size, "blocked": blocked, "stuck": stuck, "building": building, @@ -246,15 +246,15 @@ def scan(KibbleBit, source): "organisation": source["organisation"], "upsert": True, } - KibbleBit.append("ci_queue", queuedoc) + kibble_bit.append("ci_queue", queuedoc) - KibbleBit.pprint("Found %u builders in Buildbot" % len(jobs)) + kibble_bit.pprint("Found %u builders in Buildbot" % len(jobs)) threads = [] block = threading.Lock() - KibbleBit.pprint("Scanning jobs using 4 sub-threads") + kibble_bit.pprint("Scanning jobs using 4 sub-threads") for i in range(0, 4): - t = buildbotThread(block, KibbleBit, source, creds, jobs) + t = BuildbotThread(block, kibble_bit, source, creds, jobs) threads.append(t) t.start() @@ -262,7 +262,7 @@ def scan(KibbleBit, source): t.join() # We're all done, yaay - KibbleBit.pprint("Done scanning %s" % source["sourceURL"]) + kibble_bit.pprint("Done scanning %s" % source["sourceURL"]) source["steps"]["ci"] = { "time": time.time(), @@ -271,4 +271,4 @@ def scan(KibbleBit, source): "running": False, "good": True, } - KibbleBit.updateSource(source) + kibble_bit.update_source(source) diff --git a/kibble/scanners/scanners/discourse.py b/kibble/scanners/scanners/discourse.py index ee99dde6..4e1d92f0 100644 --- a/kibble/scanners/scanners/discourse.py +++ b/kibble/scanners/scanners/discourse.py @@ -39,11 +39,11 @@ def accepts(source): return False -def scanJob(KibbleBit, source, cat, creds): +def scan_job(kibble_bit, source, cat, creds): """ Scans a single discourse category for activity """ # Get $discourseURL/c/$catID catURL = os.path.join(source["sourceURL"], "c/%s" % cat["id"]) - KibbleBit.pprint("Scanning Discourse category '%s' at %s" % (cat["slug"], catURL)) + kibble_bit.pprint("Scanning Discourse category '%s' at %s" % (cat["slug"], catURL)) page = 0 allUsers = {} @@ -91,8 +91,8 @@ def scanJob(KibbleBit, source, cat, creds): # Store it (or, queue storage) unless it exists. # We don't wanna override better data, so we check if # it's there first. - if not KibbleBit.exists("person", dhash): - KibbleBit.append("person", userDoc) + if not kibble_bit.exists("person", dhash): + kibble_bit.append("person", userDoc) # Now, for each topic, we'll store a topic document for topic in catjson["topic_list"]["topics"]: @@ -118,8 +118,8 @@ def scanJob(KibbleBit, source, cat, creds): # Determine whether we should scan this topic or continue to the next one. # We'll do this by seeing if the topic already exists and has no changes or not. - if KibbleBit.exists("forum_topic", dhash): - fdoc = KibbleBit.get("forum_topic", dhash) + if kibble_bit.exists("forum_topic", dhash): + fdoc = kibble_bit.get("forum_topic", dhash) # If update in the old doc was >= current update timestamp, skip the topic if fdoc["updated"] >= UpdatedDate: continue @@ -146,8 +146,8 @@ def scanJob(KibbleBit, source, cat, creds): + "/t/%s/%s" % (topic["slug"], topic["id"]), } - KibbleBit.append("forum_topic", topicdoc) - KibbleBit.pprint("%s is new or changed, scanning" % topicdoc["url"]) + kibble_bit.append("forum_topic", topicdoc) + kibble_bit.pprint("%s is new or changed, scanning" % topicdoc["url"]) # Now grab all the individual replies/posts # Remember to not have it count as a visit! @@ -157,7 +157,7 @@ def scanJob(KibbleBit, source, cat, creds): posts = pjson["post_stream"]["posts"] # For each post/reply, construct a forum_entry document - KibbleBit.pprint("%s has %u posts" % (pURL, len(posts))) + kibble_bit.pprint("%s has %u posts" % (pURL, len(posts))) for post in posts: phash = hashlib.sha224( ( @@ -199,7 +199,7 @@ def scanJob(KibbleBit, source, cat, creds): allUsers[user["id"]] = userDoc # Store it (or, queue storage) - KibbleBit.append("person", userDoc) + kibble_bit.append("person", userDoc) # Get post date CreatedDate = datetime.datetime.strptime( @@ -222,18 +222,18 @@ def scanJob(KibbleBit, source, cat, creds): "text": post["cooked"], "url": topicdoc["url"], } - KibbleBit.append("forum_post", pdoc) + kibble_bit.append("forum_post", pdoc) else: - KibbleBit.pprint("Fetching discourse data failed!") + kibble_bit.pprint("Fetching discourse data failed!") return False return True -class discourseThread(threading.Thread): +class DiscourseThread(threading.Thread): """ Generic thread class for scheduling multiple scans at once """ def __init__(self, block, KibbleBit, source, creds, jobs): - super(discourseThread, self).__init__() + super(DiscourseThread, self).__init__() self.block = block self.KibbleBit = KibbleBit self.creds = creds @@ -241,8 +241,8 @@ def __init__(self, block, KibbleBit, source, creds, jobs): self.jobs = jobs def run(self): - badOnes = 0 - while len(self.jobs) > 0 and badOnes <= 50: + bad_ones = 0 + while len(self.jobs) > 0 and bad_ones <= 50: self.block.acquire() try: job = self.jobs.pop(0) @@ -253,12 +253,12 @@ def run(self): self.block.release() return self.block.release() - if not scanJob(self.KibbleBit, self.source, job, self.creds): + if not scan_job(self.KibbleBit, self.source, job, self.creds): self.KibbleBit.pprint( "[%s] This borked, trying another one" % job["name"] ) - badOnes += 1 - if badOnes > 10: + bad_ones += 1 + if bad_ones > 10: self.KibbleBit.pprint("Too many errors, bailing!") self.source["steps"]["forum"] = { "time": time.time(), @@ -267,13 +267,13 @@ def run(self): "running": False, "good": False, } - self.KibbleBit.updateSource(self.source) + self.KibbleBit.update_source(self.source) return else: - badOnes = 0 + bad_ones = 0 -def scan(KibbleBit, source): +def scan(kibble_bit, source): # Simple URL check discourse = re.match(r"(https?://.+)", source["sourceURL"]) if discourse: @@ -284,17 +284,17 @@ def scan(KibbleBit, source): "running": True, "good": True, } - KibbleBit.updateSource(source) + kibble_bit.update_source(source) pendingJobs = [] - KibbleBit.pprint("Parsing Discourse activity at %s" % source["sourceURL"]) + kibble_bit.pprint("Parsing Discourse activity at %s" % source["sourceURL"]) source["steps"]["forum"] = { "time": time.time(), "status": "Downloading changeset", "running": True, "good": True, } - KibbleBit.updateSource(source) + kibble_bit.update_source(source) # Discourse may neeed credentials (if basic auth) creds = None @@ -308,20 +308,20 @@ def scan(KibbleBit, source): # Get the list of categories sURL = source["sourceURL"] - KibbleBit.pprint("Getting categories...") + kibble_bit.pprint("Getting categories...") catjs = jsonapi.get("%s/categories_and_latest" % sURL, auth=creds) # Directly assign the category list as pending jobs queue, ezpz. pendingJobs = catjs["category_list"]["categories"] - KibbleBit.pprint("Found %u categories" % len(pendingJobs)) + kibble_bit.pprint("Found %u categories" % len(pendingJobs)) # Now fire off 4 threads to parse the categories threads = [] block = threading.Lock() - KibbleBit.pprint("Scanning jobs using 4 sub-threads") + kibble_bit.pprint("Scanning jobs using 4 sub-threads") for i in range(0, 4): - t = discourseThread(block, KibbleBit, source, creds, pendingJobs) + t = DiscourseThread(block, kibble_bit, source, creds, pendingJobs) threads.append(t) t.start() @@ -329,7 +329,7 @@ def scan(KibbleBit, source): t.join() # We're all done, yaay - KibbleBit.pprint("Done scanning %s" % source["sourceURL"]) + kibble_bit.pprint("Done scanning %s" % source["sourceURL"]) source["steps"]["forum"] = { "time": time.time(), @@ -338,4 +338,4 @@ def scan(KibbleBit, source): "running": False, "good": True, } - KibbleBit.updateSource(source) + kibble_bit.update_source(source) diff --git a/kibble/scanners/scanners/gerrit.py b/kibble/scanners/scanners/gerrit.py index b82dab6a..fa60dffd 100644 --- a/kibble/scanners/scanners/gerrit.py +++ b/kibble/scanners/scanners/gerrit.py @@ -160,15 +160,15 @@ def make_person(repo, raw_person): } -def update_issue(KibbleBit, issue): +def update_issue(kibble_bit, issue): id = issue["id"] - KibbleBit.pprint("Updating issue: " + id) - KibbleBit.index("issue", id, issue) + kibble_bit.pprint("Updating issue: " + id) + kibble_bit.index("issue", id, issue) -def update_person(KibbleBit, person): - KibbleBit.pprint("Updating person: " + person["name"] + " - " + person["email"]) - KibbleBit.index("person", person["id"], {"doc": person, "doc_as_upsert": True}) +def update_person(kibble_bit, person): + kibble_bit.pprint("Updating person: " + person["name"] + " - " + person["email"]) + kibble_bit.index("person", person["id"], {"doc": person, "doc_as_upsert": True}) def status_changed(stored_change, change): @@ -177,14 +177,14 @@ def status_changed(stored_change, change): return stored_change["status"] != change["status"] -def scan(KibbleBit, source): +def scan(kibble_bit, source): source["steps"]["issues"] = { "time": time.time(), "status": "Analyzing Gerrit tickets...", "running": True, "good": True, } - KibbleBit.updateSource(source) + kibble_bit.update_source(source) url = source["sourceURL"] # Try matching foo.bar/r/project/subfoo @@ -217,8 +217,8 @@ def scan(KibbleBit, source): dhash = make_hash(source, change) stored_change = None - if KibbleBit.exists("issue", dhash): - stored_change = KibbleBit.get("issue", dhash) + if kibble_bit.exists("issue", dhash): + stored_change = kibble_bit.get("issue", dhash) if not status_changed(stored_change, change): # print("change %s seen already and status unchanged. Skipping." % @@ -228,7 +228,7 @@ def scan(KibbleBit, source): details = change_details(base_url, change) issue_doc = make_issue(source, base_url, details) - update_issue(KibbleBit, issue_doc) + update_issue(kibble_bit, issue_doc) labels = details["labels"] change_people = [] @@ -247,7 +247,7 @@ def scan(KibbleBit, source): for person in change_people: if "email" in person and person["email"] not in people: people[person["email"]] = person - update_person(KibbleBit, make_person(source, person)) + update_person(kibble_bit, make_person(source, person)) except requests.HTTPError as e: print(e) @@ -258,4 +258,4 @@ def scan(KibbleBit, source): "running": False, "good": True, } - KibbleBit.updateSource(source) + kibble_bit.update_source(source) diff --git a/kibble/scanners/scanners/git-census.py b/kibble/scanners/scanners/git-census.py index aa19a037..790ae492 100644 --- a/kibble/scanners/scanners/git-census.py +++ b/kibble/scanners/scanners/git-census.py @@ -38,7 +38,7 @@ def accepts(source): return False -def scan(KibbleBit, source): +def scan(kibble_bit, source): """ Conduct a census scan """ people = {} idseries = {} @@ -50,7 +50,7 @@ def scan(KibbleBit, source): rid = source["sourceID"] url = source["sourceURL"] rootpath = "%s/%s/git" % ( - KibbleBit.config["scanner"]["scratchdir"], + kibble_bit.config["scanner"]["scratchdir"], source["organisation"], ) gpath = os.path.join(rootpath, rid) @@ -63,9 +63,8 @@ def scan(KibbleBit, source): "running": True, "good": True, } - KibbleBit.updateSource(source) + kibble_bit.update_source(source) gname = rid - inp = "" modificationDates = {} # Did we do a census before? if "census" in source and source["census"] > 0: @@ -92,7 +91,7 @@ def scan(KibbleBit, source): inp = f.read() f.close() os.unlink(tmp.name) - KibbleBit.pprint("Parsing log for %s (%s)..." % (rid, url)) + kibble_bit.pprint("Parsing log for %s (%s)..." % (rid, url)) for m in re.finditer( u":([a-f0-9]+)\|([^\r\n|]+)\|([^\r\n|]+)\|([^\r\n|]+)\|([^\r\n|]+)\|([\d+]+)\r?\n([^:]+?):", inp, @@ -145,7 +144,7 @@ def scan(KibbleBit, source): if delete > 100000000: delete = 0 if delete > 1000000 or insert > 1000000: - KibbleBit.pprint( + kibble_bit.pprint( "gigantic diff for %s (%s), ignoring" % (gpath, source["sourceURL"]) ) @@ -250,7 +249,7 @@ def scan(KibbleBit, source): "vcs": "git", "files_changed": filelist, } - KibbleBit.append( + kibble_bit.append( "person", { "upsert": True, @@ -265,7 +264,7 @@ def scan(KibbleBit, source): ).hexdigest(), }, ) - KibbleBit.append( + kibble_bit.append( "person", { "upsert": True, @@ -280,11 +279,11 @@ def scan(KibbleBit, source): ).hexdigest(), }, ) - KibbleBit.append("code_commit", js) - KibbleBit.append("code_commit_unique", jsx) + kibble_bit.append("code_commit", js) + kibble_bit.append("code_commit_unique", jsx) if True: # Do file changes?? Might wanna make this optional - KibbleBit.pprint("Scanning file changes for %s" % source["sourceURL"]) + kibble_bit.pprint("Scanning file changes for %s" % source["sourceURL"]) for filename in modificationDates: fid = hashlib.sha1( ("%s/%s" % (source["sourceID"], filename)).encode( @@ -310,11 +309,11 @@ def scan(KibbleBit, source): time.gmtime(modificationDates[filename]["created"]), ), } - found = KibbleBit.exists("file_history", fid) + found = kibble_bit.exists("file_history", fid) if found: del jsfe["created"] del jsfe["createdDate"] - KibbleBit.append("file_history", jsfe) + kibble_bit.append("file_history", jsfe) source["steps"]["census"] = { "time": time.time(), @@ -324,4 +323,4 @@ def scan(KibbleBit, source): "good": True, } source["census"] = time.time() - KibbleBit.updateSource(source) + kibble_bit.update_source(source) diff --git a/kibble/scanners/scanners/git-evolution.py b/kibble/scanners/scanners/git-evolution.py index 07a7ada2..b533b4bb 100644 --- a/kibble/scanners/scanners/git-evolution.py +++ b/kibble/scanners/scanners/git-evolution.py @@ -46,12 +46,12 @@ def get_first_ref(gpath): % gpath, shell=True, ) - except: + except: # pylint: disable=bare-except print("Could not get first ref, exiting!") return None -def acquire(KibbleBit, source): +def acquire(kibble_bit, source): source["steps"]["evolution"] = { "time": time.time(), "status": "Evolution scan started at " @@ -59,10 +59,10 @@ def acquire(KibbleBit, source): "running": True, "good": True, } - KibbleBit.updateSource(source) + kibble_bit.update_source(source) -def release(KibbleBit, source, status, exception=None, good=False): +def release(kibble_bit, source, status, exception=None, good=False): source["steps"]["evolution"] = { "time": time.time(), "status": status, @@ -72,7 +72,7 @@ def release(KibbleBit, source, status, exception=None, good=False): if exception: source["steps"]["evolution"].update({"exception": exception}) - KibbleBit.updateSource(source) + kibble_bit.update_source(source) def check_branch(gpath, date, branch): @@ -82,7 +82,7 @@ def check_branch(gpath, date, branch): shell=True, ) return True - except: + except: # pylint: disable=bare-except return False @@ -116,7 +116,7 @@ def find_branch(date, gpath): stderr=subprocess.DEVNULL, ) return "master" - except: + except: # pylint: disable=bare-except os.chdir(gpath) try: return ( @@ -129,22 +129,22 @@ def find_branch(date, gpath): .strip() .strip("* ") ) - except: + except: # pylint: disable=bare-except # print("meh! no branch") return None -def scan(KibbleBit, source): +def scan(kibble_bit, source): rid = source["sourceID"] rootpath = "%s/%s/git" % ( - KibbleBit.config["scanner"]["scratchdir"], + kibble_bit.config["scanner"]["scratchdir"], source["organisation"], ) gpath = os.path.join(rootpath, rid) gname = source["sourceID"] - KibbleBit.pprint("Doing evolution scan of %s" % gname) + kibble_bit.pprint("Doing evolution scan of %s" % gname) inp = get_first_ref(gpath) if inp: @@ -158,13 +158,13 @@ def scan(KibbleBit, source): rid = source["sourceID"] url = source["sourceURL"] rootpath = "%s/%s/git" % ( - KibbleBit.config["scanner"]["scratchdir"], + kibble_bit.config["scanner"]["scratchdir"], source["organisation"], ) gpath = os.path.join(rootpath, rid) if source["steps"]["sync"]["good"] and os.path.exists(gpath): - acquire(KibbleBit, source) + acquire(kibble_bit, source) branch = find_branch(date, gpath) if not branch: @@ -178,7 +178,7 @@ def scan(KibbleBit, source): branch_exists = check_branch(gpath, date, branch) if not branch_exists: - KibbleBit.pprint("Not trunk either (bad repo?), skipping") + kibble_bit.pprint("Not trunk either (bad repo?), skipping") release( source, "Could not do evolutionary scan of code", @@ -207,10 +207,10 @@ def scan(KibbleBit, source): dhash = hashlib.sha224( (source["sourceID"] + date).encode("ascii", "replace") ).hexdigest() - found = KibbleBit.exists("evolution", dhash) + found = kibble_bit.exists("evolution", dhash) if not found: checkout(gpath, date, branch) - KibbleBit.pprint( + kibble_bit.pprint( "Running cloc on %s (%s) at %s" % (gname, source["sourceURL"], date) ) @@ -229,7 +229,7 @@ def scan(KibbleBit, source): "cost": cost, "languages": languages, } - KibbleBit.index("evolution", dhash, js) + kibble_bit.index("evolution", dhash, js) quarter -= 3 if quarter <= 0: quarter += 12 @@ -238,9 +238,9 @@ def scan(KibbleBit, source): # decrease month by 3 now = time.mktime(datetime.date(year, quarter, 1).timetuple()) except Exception as e: - KibbleBit.pprint(e) + kibble_bit.pprint(e) release( - KibbleBit, + kibble_bit, source, "Evolution scan failed at " + time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime()), @@ -249,7 +249,7 @@ def scan(KibbleBit, source): return release( - KibbleBit, + kibble_bit, source, "Evolution scan completed at " + time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime()), diff --git a/kibble/scanners/scanners/git-sloc.py b/kibble/scanners/scanners/git-sloc.py index d9633577..8b44c543 100644 --- a/kibble/scanners/scanners/git-sloc.py +++ b/kibble/scanners/scanners/git-sloc.py @@ -37,12 +37,12 @@ def accepts(source): return False -def scan(KibbleBit, source): +def scan(kibble_bit, source): rid = source["sourceID"] url = source["sourceURL"] rootpath = "%s/%s/git" % ( - KibbleBit.config["scanner"]["scratchdir"], + kibble_bit.config["scanner"]["scratchdir"], source["organisation"], ) gpath = os.path.join(rootpath, rid) @@ -55,16 +55,16 @@ def scan(KibbleBit, source): "running": True, "good": True, } - KibbleBit.updateSource(source) + kibble_bit.update_source(source) try: branch = git.defaultBranch(source, gpath) subprocess.call("cd %s && git checkout %s" % (gpath, branch), shell=True) - except: - KibbleBit.pprint("SLoC counter failed to find main branch for %s!!" % url) + except: # pylint: disable=bare-except + kibble_bit.pprint("SLoC counter failed to find main branch for %s!!" % url) return False - KibbleBit.pprint("Running SLoC count for %s" % url) + kibble_bit.pprint("Running SLoC count for %s" % url) languages, codecount, comment, blank, years, cost = sloc.count(gpath) sloc_ = { @@ -84,4 +84,4 @@ def scan(KibbleBit, source): "running": False, "good": True, } - KibbleBit.updateSource(source) + kibble_bit.update_source(source) diff --git a/kibble/scanners/scanners/git-sync.py b/kibble/scanners/scanners/git-sync.py index e2e9422b..c1c40f6d 100644 --- a/kibble/scanners/scanners/git-sync.py +++ b/kibble/scanners/scanners/git-sync.py @@ -30,18 +30,18 @@ def accepts(source): if source["type"] == "git": return True # There are cases where we have a github repo, but don't wanna analyze the code, just issues - if source["type"] == "github" and source.get("issuesonly", False) == False: + if source["type"] == "github" and source.get("issuesonly", False) is False: return True return False -def scan(KibbleBit, source): +def scan(kibble_bit, source): # Get some vars, construct a data path for the repo path = source["sourceID"] url = source["sourceURL"] rootpath = "%s/%s/git" % ( - KibbleBit.config["scanner"]["scratchdir"], + kibble_bit.config["scanner"]["scratchdir"], source["organisation"], ) @@ -50,20 +50,20 @@ def scan(KibbleBit, source): try: os.makedirs(rootpath, exist_ok=True) print("Created root path %s" % rootpath) - except Exception as err: + except: # pylint: disable=bare-except source["steps"]["sync"] = { "time": time.time(), "status": "Could not create root scratch dir - permision denied?", "running": False, "good": False, } - KibbleBit.updateSource(source) + kibble_bit.update_source(source) return # This is were the repo should be cloned datapath = os.path.join(rootpath, path) - KibbleBit.pprint("Checking out %s as %s" % (url, path)) + kibble_bit.pprint("Checking out %s as %s" % (url, path)) try: source["steps"]["sync"] = { @@ -72,14 +72,14 @@ def scan(KibbleBit, source): "running": True, "good": True, } - KibbleBit.updateSource(source) + kibble_bit.update_source(source) # If we already checked this out earlier, just sync it. if os.path.exists(datapath): - KibbleBit.pprint("Repo %s exists, fetching changes..." % datapath) + kibble_bit.pprint("Repo %s exists, fetching changes..." % datapath) # Do we have a default branch here? - branch = git.defaultBranch(source, datapath, KibbleBit) + branch = git.defaultBranch(source, datapath, kibble_bit) if len(branch) == 0: source["default_branch"] = branch source["steps"]["sync"] = { @@ -89,14 +89,14 @@ def scan(KibbleBit, source): "running": False, "good": False, } - KibbleBit.updateSource(source) - KibbleBit.pprint( + kibble_bit.update_source(source) + kibble_bit.pprint( "No default branch found for %s (%s)" % (source["sourceID"], source["sourceURL"]) ) return - KibbleBit.pprint("Using branch %s" % branch) + kibble_bit.pprint("Using branch %s" % branch) # Try twice checking out the main branch and fetching changes. # Sometimes we need to clean up after older scanners, which is # why we try twice. If first attempt fails, clean up and try again. @@ -137,11 +137,11 @@ def scan(KibbleBit, source): shell=True, stderr=subprocess.STDOUT, ) - except: + except: # pylint: disable=bare-except pass # This is a new repo, clone it! else: - KibbleBit.pprint("%s is new, cloning...!" % datapath) + kibble_bit.pprint("%s is new, cloning...!" % datapath) subprocess.check_output( "GIT_TERMINAL_PROMPT=0 cd %s && git clone %s %s" % (rootpath, url, path), @@ -150,8 +150,8 @@ def scan(KibbleBit, source): ) except subprocess.CalledProcessError as err: - KibbleBit.pprint("Repository sync failed (no master?)") - KibbleBit.pprint(str(err.output)) + kibble_bit.pprint("Repository sync failed (no master?)") + kibble_bit.pprint(str(err.output)) source["steps"]["sync"] = { "time": time.time(), "status": "Sync failed at " @@ -160,7 +160,7 @@ def scan(KibbleBit, source): "good": False, "exception": str(err.output), } - KibbleBit.updateSource(source) + kibble_bit.update_source(source) return # All good, yay! @@ -171,4 +171,4 @@ def scan(KibbleBit, source): "running": False, "good": True, } - KibbleBit.updateSource(source) + kibble_bit.update_source(source) diff --git a/kibble/scanners/scanners/github-issues.py b/kibble/scanners/scanners/github-issues.py index 06bbde67..cdfcde1e 100644 --- a/kibble/scanners/scanners/github-issues.py +++ b/kibble/scanners/scanners/github-issues.py @@ -124,29 +124,29 @@ def status_changed(stored_issue, issue): return stored_issue["status"] != issue["status"] -def update_issue(KibbleBit, issue): - KibbleBit.append("issue", issue) +def update_issue(kibble_bit, issue): + kibble_bit.append("issue", issue) -def update_person(KibbleBit, person): +def update_person(kibble_bit, person): person["upsert"] = True - KibbleBit.append("person", person) + kibble_bit.append("person", person) -def scan(KibbleBit, source, firstAttempt=True): +def scan(kibble_bit, source, first_attempt=True): auth = None people = {} if "creds" in source: - KibbleBit.pprint("Using auth for repo %s" % source["sourceURL"]) + kibble_bit.pprint("Using auth for repo %s" % source["sourceURL"]) creds = source["creds"] if creds and "username" in creds: auth = (creds["username"], creds["password"]) TL = github.get_tokens_left(auth=auth) - KibbleBit.pprint("Scanning for GitHub issues (%u tokens left on GitHub)" % TL) + kibble_bit.pprint("Scanning for GitHub issues (%u tokens left on GitHub)" % TL) # Have we scanned before? If so, only do a 3 month scan here. - doneBefore = False + done_before = False if source.get("steps") and source["steps"].get("issues"): - doneBefore = True + done_before = True source["steps"]["issues"] = { "time": time.time(), "status": "Issue scan started at " @@ -154,13 +154,13 @@ def scan(KibbleBit, source, firstAttempt=True): "running": True, "good": True, } - KibbleBit.updateSource(source) + kibble_bit.update_source(source) try: - if doneBefore: + if done_before: since = time.strftime( "%Y-%m-%dT%H:%M:%SZ", time.gmtime(time.time() - (3 * 30 * 86400)) ) - KibbleBit.pprint("Fetching changes since %s" % since) + kibble_bit.pprint("Fetching changes since %s" % since) issues = github.get_all( source, github.issues, @@ -174,7 +174,7 @@ def scan(KibbleBit, source, firstAttempt=True): params={"filter": "all", "state": "all"}, auth=auth, ) - KibbleBit.pprint( + kibble_bit.pprint( "Fetched %s issues for %s" % (str(len(issues)), source["sourceURL"]) ) @@ -185,24 +185,24 @@ def scan(KibbleBit, source, firstAttempt=True): source, issue, github.user(issue["user"]["url"], auth=auth) ) people[issue["user"]["login"]] = person - update_person(KibbleBit, person) + update_person(kibble_bit, person) if "closed_by" in issue and not issue["closed_by"]["login"] in people: closer = make_person( source, issue, github.user(issue["closed_by"]["url"], auth=auth) ) people[issue["closed_by"]["login"]] = closer - update_person(KibbleBit, closer) + update_person(kibble_bit, closer) doc = make_issue(source, issue, people) dhash = doc["id"] - if KibbleBit.exists("issue", dhash): - es_doc = KibbleBit.get("issue", dhash) + if kibble_bit.exists("issue", dhash): + es_doc = kibble_bit.get("issue", dhash) if not status_changed(es_doc, doc): # KibbleBit.pprint("change %s seen already and status unchanged. Skipping." % issue['id']) continue - update_issue(KibbleBit, doc) + update_issue(kibble_bit, doc) source["steps"]["issues"] = { "time": time.time(), @@ -211,28 +211,30 @@ def scan(KibbleBit, source, firstAttempt=True): "running": False, "good": True, } - KibbleBit.updateSource(source) + kibble_bit.update_source(source) except requests.HTTPError as e: # If we errored out because of rate limiting, retry later, otherwise bail - if firstAttempt: + if first_attempt: sleeps = 0 if github.get_tokens_left(auth=auth) < 10: - KibbleBit.pprint("Hit rate limits, trying to sleep it off!") + kibble_bit.pprint("Hit rate limits, trying to sleep it off!") while github.get_tokens_left(auth=auth) < 10: sleeps += 1 if sleeps > 24: - KibbleBit.pprint( + kibble_bit.pprint( "Slept for too long without finding a reset rate limit, giving up!" ) break time.sleep(300) # Sleep 5 min, then check again.. # If we have tokens, try one more time... if github.get_tokens_left(auth=auth) > 10: - scan(KibbleBit, source, False) # If this one fails, bail completely + scan( + kibble_bit, source, False + ) # If this one fails, bail completely return - KibbleBit.pprint("HTTP Error, rate limit exceeded?") + kibble_bit.pprint("HTTP Error, rate limit exceeded?") source["steps"]["issues"] = { "time": time.time(), "status": "Issue scan failed at " @@ -242,4 +244,4 @@ def scan(KibbleBit, source, firstAttempt=True): "running": False, "good": False, } - KibbleBit.updateSource(source) + kibble_bit.update_source(source) diff --git a/kibble/scanners/scanners/github-stats.py b/kibble/scanners/scanners/github-stats.py index 504d1516..ed4eda14 100644 --- a/kibble/scanners/scanners/github-stats.py +++ b/kibble/scanners/scanners/github-stats.py @@ -32,26 +32,26 @@ def accepts(source): return False -def getTime(string): +def get_time(string): """ Convert GitHub timestamp to epoch """ return time.mktime( time.strptime(re.sub(r"Z", "", str(string)), "%Y-%m-%dT%H:%M:%S") ) -def scan(KibbleBit, source): +def scan(kibble_bit, source): # Get some vars, construct a data path for the repo url = source["sourceURL"] auth = None if "creds" in source: - KibbleBit.pprint("Using auth for repo %s" % source["sourceURL"]) + kibble_bit.pprint("Using auth for repo %s" % source["sourceURL"]) creds = source["creds"] if creds and "username" in creds: auth = (creds["username"], creds["password"]) else: - KibbleBit.pprint( + kibble_bit.pprint( "GitHub stats requires auth, none provided. Ignoring this repo." ) return @@ -62,13 +62,13 @@ def scan(KibbleBit, source): "running": True, "good": True, } - KibbleBit.updateSource(source) + kibble_bit.update_source(source) # Get views views = github.views(url, auth) if "views" in views: for el in views["views"]: - ts = getTime(el["timestamp"]) + ts = get_time(el["timestamp"]) shash = hashlib.sha224( ( "%s-%s-%s-clones" @@ -85,13 +85,13 @@ def scan(KibbleBit, source): "ghtype": "views", "id": shash, } - KibbleBit.append("ghstats", bit) + kibble_bit.append("ghstats", bit) # Get clones clones = github.clones(url, auth) if "clones" in clones: for el in clones["clones"]: - ts = getTime(el["timestamp"]) + ts = get_time(el["timestamp"]) shash = hashlib.sha224( ( "%s-%s-%s-clones" @@ -108,14 +108,14 @@ def scan(KibbleBit, source): "ghtype": "clones", "id": shash, } - KibbleBit.append("ghstats", bit) + kibble_bit.append("ghstats", bit) # Get referrers refs = github.referrers(url, auth) if refs: for el in refs: el["timestamp"] = time.strftime("%Y-%m-%dT%H:%M:%S", time.time()) - ts = getTime(el["timestamp"]) + ts = get_time(el["timestamp"]) shash = hashlib.sha224( ( "%s-%s-%s-refs" % (source["organisation"], url, el["timestamp"]) @@ -131,7 +131,7 @@ def scan(KibbleBit, source): "ghtype": "referrers", "id": shash, } - KibbleBit.append("ghstats", bit) - except: + kibble_bit.append("ghstats", bit) + except: # pylint: disable=bare-except pass # All done! diff --git a/kibble/scanners/scanners/jenkins.py b/kibble/scanners/scanners/jenkins.py index f7ec495e..3db31ac2 100644 --- a/kibble/scanners/scanners/jenkins.py +++ b/kibble/scanners/scanners/jenkins.py @@ -39,7 +39,7 @@ def accepts(source): return False -def scanJob(KibbleBit, source, job, creds): +def scan_job(kibble_bit, source, job, creds): """ Scans a single job for activity """ NOW = int(datetime.datetime.utcnow().timestamp()) jname = job["name"] @@ -50,16 +50,14 @@ def scanJob(KibbleBit, source, job, creds): "ascii", errors="replace" ) ).hexdigest() - doc = None - found = KibbleBit.exists("cijob", dhash) # Get $jenkins/job/$job-name/json... - jobURL = ( + job_url = ( "%s/api/json?depth=2&tree=builds[number,status,timestamp,id,result,duration]" % job["fullURL"] ) - KibbleBit.pprint(jobURL) - jobjson = jsonapi.get(jobURL, auth=creds) + kibble_bit.pprint(job_url) + jobjson = jsonapi.get(job_url, auth=creds) # If valid JSON, ... if jobjson: @@ -72,15 +70,15 @@ def scanJob(KibbleBit, source, job, creds): ).hexdigest() builddoc = None try: - builddoc = KibbleBit.get("ci_build", buildhash) - except: + builddoc = kibble_bit.get("ci_build", buildhash) + except: # pylint: disable=bare-except pass # If this build already completed, no need to parse it again if builddoc and builddoc.get("completed", False): continue - KibbleBit.pprint( + kibble_bit.pprint( "[%s-%s] This is new or pending, analyzing..." % (jname, build["id"]) ) @@ -117,7 +115,7 @@ def scanJob(KibbleBit, source, job, creds): "completed": completed, "duration": build["duration"], "job": jname, - "jobURL": jobURL, + "job_url": job_url, "status": status, "started": int(build["timestamp"] / 1000), "ci": "jenkins", @@ -127,20 +125,20 @@ def scanJob(KibbleBit, source, job, creds): "organisation": source["organisation"], "upsert": True, } - KibbleBit.append("ci_build", doc) + kibble_bit.append("ci_build", doc) # Yay, it worked! return True # Boo, it failed! - KibbleBit.pprint("Fetching job data failed!") + kibble_bit.pprint("Fetching job data failed!") return False -class jenkinsThread(threading.Thread): +class Jenkinsthread(threading.Thread): """ Generic thread class for scheduling multiple scans at once """ def __init__(self, block, KibbleBit, source, creds, jobs): - super(jenkinsThread, self).__init__() + super(Jenkinsthread, self).__init__() self.block = block self.KibbleBit = KibbleBit self.creds = creds @@ -148,8 +146,8 @@ def __init__(self, block, KibbleBit, source, creds, jobs): self.jobs = jobs def run(self): - badOnes = 0 - while len(self.jobs) > 0 and badOnes <= 50: + bad_ones = 0 + while len(self.jobs) > 0 and bad_ones <= 50: self.block.acquire() try: job = self.jobs.pop(0) @@ -165,12 +163,12 @@ def run(self): ssource = dict(self.source) if jfolder: ssource["sourceURL"] += "/job/" + jfolder - if not scanJob(self.KibbleBit, ssource, job, self.creds): + if not scan_job(self.KibbleBit, ssource, job, self.creds): self.KibbleBit.pprint( "[%s] This borked, trying another one" % job["name"] ) - badOnes += 1 - if badOnes > 100: + bad_ones += 1 + if bad_ones > 100: self.KibbleBit.pprint("Too many errors, bailing!") self.source["steps"]["issues"] = { "time": time.time(), @@ -179,13 +177,13 @@ def run(self): "running": False, "good": False, } - self.KibbleBit.updateSource(self.source) + self.KibbleBit.update_source(self.source) return else: - badOnes = 0 + bad_ones = 0 -def scan(KibbleBit, source): +def scan(kibble_bit, source): # Simple URL check jenkins = re.match(r"(https?://.+)", source["sourceURL"]) if jenkins: @@ -196,17 +194,16 @@ def scan(KibbleBit, source): "running": True, "good": True, } - KibbleBit.updateSource(source) + kibble_bit.update_source(source) - pendingJobs = [] - KibbleBit.pprint("Parsing Jenkins activity at %s" % source["sourceURL"]) + kibble_bit.pprint("Parsing Jenkins activity at %s" % source["sourceURL"]) source["steps"]["issues"] = { "time": time.time(), "status": "Downloading changeset", "running": True, "good": True, } - KibbleBit.updateSource(source) + kibble_bit.update_source(source) # Jenkins may neeed credentials creds = None @@ -219,15 +216,15 @@ def scan(KibbleBit, source): creds = "%s:%s" % (source["creds"]["username"], source["creds"]["password"]) # Get the job list - sURL = source["sourceURL"] - KibbleBit.pprint("Getting job list...") + s_url = source["sourceURL"] + kibble_bit.pprint("Getting job list...") jobsjs = jsonapi.get( - "%s/api/json?tree=jobs[name,color]&depth=1" % sURL, auth=creds + "%s/api/json?tree=jobs[name,color]&depth=1" % s_url, auth=creds ) # Get the current queue - KibbleBit.pprint("Getting job queue...") - queuejs = jsonapi.get("%s/queue/api/json?depth=1" % sURL, auth=creds) + kibble_bit.pprint("Getting job queue...") + queuejs = jsonapi.get("%s/queue/api/json?depth=1" % s_url, auth=creds) # Save queue snapshot NOW = int(datetime.datetime.utcnow().timestamp()) @@ -256,7 +253,7 @@ def scan(KibbleBit, source): # Count how many jobs are building, find any folders... actual_jobs, building = get_all_jobs( - KibbleBit, source, jobsjs.get("jobs", []), creds + kibble_bit, source, jobsjs.get("jobs", []), creds ) # Write up a queue doc @@ -275,16 +272,16 @@ def scan(KibbleBit, source): "organisation": source["organisation"], "upsert": True, } - KibbleBit.append("ci_queue", queuedoc) + kibble_bit.append("ci_queue", queuedoc) - pendingJobs = actual_jobs - KibbleBit.pprint("Found %u jobs in Jenkins" % len(pendingJobs)) + pending_jobs = actual_jobs + kibble_bit.pprint("Found %u jobs in Jenkins" % len(pending_jobs)) threads = [] block = threading.Lock() - KibbleBit.pprint("Scanning jobs using 4 sub-threads") + kibble_bit.pprint("Scanning jobs using 4 sub-threads") for i in range(0, 4): - t = jenkinsThread(block, KibbleBit, source, creds, pendingJobs) + t = Jenkinsthread(block, kibble_bit, source, creds, pending_jobs) threads.append(t) t.start() @@ -292,7 +289,7 @@ def scan(KibbleBit, source): t.join() # We're all done, yaay - KibbleBit.pprint("Done scanning %s" % source["sourceURL"]) + kibble_bit.pprint("Done scanning %s" % source["sourceURL"]) source["steps"]["issues"] = { "time": time.time(), @@ -301,10 +298,10 @@ def scan(KibbleBit, source): "running": False, "good": True, } - KibbleBit.updateSource(source) + kibble_bit.update_source(source) -def get_all_jobs(KibbleBit, source, joblist, creds): +def get_all_jobs(kibble_bit, source, joblist, creds): real_jobs = [] building = 0 for job in joblist: @@ -314,30 +311,30 @@ def get_all_jobs(KibbleBit, source, joblist, creds): "jenkins.branch.OrganizationFolder", "org.jenkinsci.plugins.workflow.multibranch.WorkflowMultiBranchProject", ]: - KibbleBit.pprint("%s is a jobs folder, expanding..." % job["name"]) - csURL = "%s/job/%s" % ( + kibble_bit.pprint("%s is a jobs folder, expanding..." % job["name"]) + cs_url = "%s/job/%s" % ( source["sourceURL"], urllib.parse.quote(job["name"].replace("/", "%2F")), ) try: child_jobs = jsonapi.get( - "%s/api/json?tree=jobs[name,color]&depth=1" % csURL, auth=creds + "%s/api/json?tree=jobs[name,color]&depth=1" % cs_url, auth=creds ) csource = dict(source) - csource["sourceURL"] = csURL + csource["sourceURL"] = cs_url if not csource.get("folder"): csource["folder"] = job["name"] else: csource["folder"] += "-" + job["name"] cjobs, cbuilding = get_all_jobs( - KibbleBit, csource, child_jobs.get("jobs", []), creds + kibble_bit, csource, child_jobs.get("jobs", []), creds ) building += cbuilding for cjob in cjobs: real_jobs.append(cjob) - except: - KibbleBit.pprint("Couldn't get child jobs, bailing") - print("%s/api/json?tree=jobs[name,color]&depth=1" % csURL) + except: # pylint: disable=bare-except + kibble_bit.pprint("Couldn't get child jobs, bailing") + print("%s/api/json?tree=jobs[name,color]&depth=1" % cs_url) # Or standard job? else: # Is it building? diff --git a/kibble/scanners/scanners/jira.py b/kibble/scanners/scanners/jira.py index 41dafa6b..14789c9d 100644 --- a/kibble/scanners/scanners/jira.py +++ b/kibble/scanners/scanners/jira.py @@ -43,7 +43,7 @@ def accepts(source): return False -def getTime(string): +def get_time(string): return time.mktime( time.strptime(re.sub(r"\..*", "", str(string)), "%Y-%m-%dT%H:%M:%S") ) @@ -121,7 +121,7 @@ def pchange(js): return False -def scanTicket(KibbleBit, key, u, source, creds, openTickets): +def scan_ticket(kibble_bit, key, u, source, creds, open_tickets): """ Scans a single ticket for activity and people """ dhash = hashlib.sha224( @@ -129,8 +129,7 @@ def scanTicket(KibbleBit, key, u, source, creds, openTickets): "ascii", errors="replace" ) ).hexdigest() - found = True - parseIt = False + parse_it = False # the 'domain' var we try to figure out here is used # for faking email addresses and keep them unique, @@ -140,62 +139,62 @@ def scanTicket(KibbleBit, key, u, source, creds, openTickets): if m: domain = m.group(1) - found = KibbleBit.exists("issue", dhash) + found = kibble_bit.exists("issue", dhash) if not found: - KibbleBit.pprint("[%s] We've never seen this ticket before, parsing..." % key) - parseIt = True + kibble_bit.pprint("[%s] We've never seen this ticket before, parsing..." % key) + parse_it = True else: - ticket = KibbleBit.get("issue", dhash) - if ticket["status"] == "closed" and key in openTickets: - KibbleBit.pprint("[%s] Ticket was reopened, reparsing" % key) - parseIt = True - elif ticket["status"] == "open" and not key in openTickets: - KibbleBit.pprint("[%s] Ticket was recently closed, parsing it" % key) - parseIt = True + ticket = kibble_bit.get("issue", dhash) + if ticket["status"] == "closed" and key in open_tickets: + kibble_bit.pprint("[%s] Ticket was reopened, reparsing" % key) + parse_it = True + elif ticket["status"] == "open" and not key in open_tickets: + kibble_bit.pprint("[%s] Ticket was recently closed, parsing it" % key) + parse_it = True else: if ( ticket["issueCreator"] == "unknown@kibble" or ticket["issueCloser"] == "unknown@kibble" ): # Gotta redo these! - parseIt = True - KibbleBit.pprint( + parse_it = True + kibble_bit.pprint( "[%s] Ticket contains erroneous data from a previous scan, reparsing" % key ) # This is just noise! # KibbleBit.pprint("[%s] Ticket hasn't changed, ignoring..." % key) - if parseIt: - KibbleBit.pprint("[%s] Parsing data from JIRA at %s..." % (key, domain)) - queryURL = ( + if parse_it: + kibble_bit.pprint("[%s] Parsing data from JIRA at %s..." % (key, domain)) + query_url = ( "%s/rest/api/2/issue/%s?fields=creator,reporter,status,issuetype,summary,assignee,resolutiondate,created,priority,changelog,comment,resolution,votes&expand=changelog" % (u, key) ) - jiraURL = "%s/browse/%s" % (u, key) + jira_url = "%s/browse/%s" % (u, key) try: - tjson = jsonapi.get(queryURL, auth=creds) + tjson = jsonapi.get(query_url, auth=creds) if not tjson: - KibbleBit.pprint("%s does not exist (404'ed)" % key) + kibble_bit.pprint("%s does not exist (404'ed)" % key) return False except requests.exceptions.ConnectionError as err: - KibbleBit.pprint(f"Connection error: {err}, skipping this ticket for now!") + kibble_bit.pprint(f"Connection error: {err}, skipping this ticket for now!") return False st, closer = wasclosed(tjson) if st and not closer: - KibbleBit.pprint("Closed but no closer??") - closerEmail = None + kibble_bit.pprint("Closed but no closer??") + closer_email = None status = "closed" if st else "open" # Make sure we actually have field data to work with if not tjson.get("fields") or not tjson["fields"].get("created"): - KibbleBit.pprint( + kibble_bit.pprint( "[%s] JIRA response is missing field data, ignoring ticket." % key ) return False - cd = getTime(tjson["fields"]["created"]) + cd = get_time(tjson["fields"]["created"]) rd = ( - getTime(tjson["fields"]["resolutiondate"]) + get_time(tjson["fields"]["resolutiondate"]) if "resolutiondate" in tjson["fields"] and tjson["fields"]["resolutiondate"] else None ) @@ -221,40 +220,40 @@ def scanTicket(KibbleBit, key, u, source, creds, openTickets): title = tjson["fields"]["summary"] if closer: # print("Parsing closer") - closerEmail = ( + closer_email = ( closer.get("emailAddress", closer.get("name")) .replace(" dot ", ".", 10) .replace(" at ", "@", 1) ) - if not "@" in closerEmail: - closerEmail = "%s@%s" % (closerEmail, domain) - displayName = closer.get("displayName", "Unkown") - if displayName and len(displayName) > 0: + if not "@" in closer_email: + closer_email = "%s@%s" % (closer_email, domain) + display_name = closer.get("displayName", "Unkown") + if display_name and len(display_name) > 0: # Add to people db pid = hashlib.sha1( - ("%s%s" % (source["organisation"], closerEmail)).encode( + ("%s%s" % (source["organisation"], closer_email)).encode( "ascii", errors="replace" ) ).hexdigest() jsp = { - "name": displayName, - "email": closerEmail, + "name": display_name, + "email": closer_email, "organisation": source["organisation"], "id": pid, "upsert": True, } - KibbleBit.append("person", jsp) + kibble_bit.append("person", jsp) if creator: creator = creator.replace(" dot ", ".", 10).replace(" at ", "@", 1) if not "@" in creator: creator = "%s@%s" % (creator, domain) - displayName = ( + display_name = ( tjson["fields"]["reporter"]["displayName"] if tjson["fields"]["reporter"] else None ) - if displayName and len(displayName) > 0: + if display_name and len(display_name) > 0: # Add to people db pid = hashlib.sha1( ("%s%s" % (source["organisation"], creator)).encode( @@ -262,13 +261,13 @@ def scanTicket(KibbleBit, key, u, source, creds, openTickets): ) ).hexdigest() jsp = { - "name": displayName, + "name": display_name, "email": creator, "organisation": source["organisation"], "id": pid, "upsert": True, } - KibbleBit.append("person", jsp) + kibble_bit.append("person", jsp) if assignee and not "@" in assignee: assignee = "%s@%s" % (assignee, domain) jso = { @@ -276,12 +275,12 @@ def scanTicket(KibbleBit, key, u, source, creds, openTickets): "key": key, "organisation": source["organisation"], "sourceID": source["sourceID"], - "url": jiraURL, + "url": jira_url, "status": status, "created": cd, "closed": rd, "issuetype": "issue", - "issueCloser": closerEmail, + "issueCloser": closer_email, "createdDate": time.strftime("%Y/%m/%d %H:%M:%S", time.gmtime(cd)), "closedDate": time.strftime("%Y/%m/%d %H:%M:%S", time.gmtime(rd)) if rd @@ -294,7 +293,7 @@ def scanTicket(KibbleBit, key, u, source, creds, openTickets): "comments": comments, "title": title, } - KibbleBit.append("issue", jso) + kibble_bit.append("issue", jso) return True @@ -304,19 +303,19 @@ def scanTicket(KibbleBit, key, u, source, creds, openTickets): # return False -class jiraThread(threading.Thread): - def __init__(self, block, KibbleBit, source, creds, pt, ot): - super(jiraThread, self).__init__() +class JiraThread(threading.Thread): + def __init__(self, block, kibble_bit, source, creds, pt, ot): + super(JiraThread, self).__init__() self.block = block - self.KibbleBit = KibbleBit + self.KibbleBit = kibble_bit self.creds = creds self.source = source self.pendingTickets = pt self.openTickets = ot def run(self): - badOnes = 0 - while len(self.pendingTickets) > 0 and badOnes <= 50: + bad_ones = 0 + while len(self.pendingTickets) > 0 and bad_ones <= 50: # print("%u elements left to count" % len(pendingTickets)) self.block.acquire() try: @@ -329,12 +328,12 @@ def run(self): self.block.release() return self.block.release() - if not scanTicket( + if not scan_ticket( self.KibbleBit, rl[0], rl[1], rl[2], self.creds, self.openTickets ): self.KibbleBit.pprint("[%s] This borked, trying another one" % rl[0]) - badOnes += 1 - if badOnes > 100: + bad_ones += 1 + if bad_ones > 100: self.KibbleBit.pprint("Too many errors, bailing!") self.source["steps"]["issues"] = { "time": time.time(), @@ -343,13 +342,13 @@ def run(self): "running": False, "good": False, } - self.KibbleBit.updateSource(self.source) + self.KibbleBit.update_source(self.source) return else: - badOnes = 0 + bad_ones = 0 -def scan(KibbleBit, source): +def scan(kibble_bit, source): jira = re.match(r"(https?://.+)/browse/([A-Z0-9]+)", source["sourceURL"]) if jira: @@ -363,7 +362,7 @@ def scan(KibbleBit, source): ): creds = "%s:%s" % (source["creds"]["username"], source["creds"]["password"]) if not creds: - KibbleBit.pprint( + kibble_bit.pprint( "JIRA at %s requires authentication, but none was found! Bailing." % source["sourceURL"] ) @@ -373,7 +372,7 @@ def scan(KibbleBit, source): "running": True, "good": True, } - KibbleBit.updateSource(source) + kibble_bit.update_source(source) return source["steps"]["issues"] = { @@ -382,74 +381,75 @@ def scan(KibbleBit, source): "running": True, "good": True, } - KibbleBit.updateSource(source) + kibble_bit.update_source(source) - pendingTickets = [] - KibbleBit.pprint("Parsing JIRA activity at %s" % source["sourceURL"]) + pending_tickets = [] + kibble_bit.pprint("Parsing JIRA activity at %s" % source["sourceURL"]) source["steps"]["issues"] = { "time": time.time(), "status": "Downloading changeset", "running": True, "good": True, } - KibbleBit.updateSource(source) + kibble_bit.update_source(source) # Get base URL, list and domain to parse u = jira.group(1) instance = jira.group(2) - lastTicket = 0 - latestURL = ( + last_ticket = 0 + latest_url = ( "%s/rest/api/2/search?jql=project=%s+order+by+createdDate+DESC&fields=id,key&maxResults=1" % (u, instance) ) js = None - js = jsonapi.get(latestURL, auth=creds) + js = jsonapi.get(latest_url, auth=creds) if "issues" in js and len(js["issues"]) == 1: key = js["issues"][0]["key"] m = re.search(r"-(\d+)$", key) if m: - lastTicket = int(m.group(1)) + last_ticket = int(m.group(1)) - openTickets = [] - startAt = 0 - badTries = 0 - while True and badTries < 10: - openURL = ( + open_tickets = [] + start_at = 0 + bad_tries = 0 + while bad_tries < 10: + open_url = ( "%s/rest/api/2/search?jql=project=%s+and+status=open+order+by+createdDate+ASC&fields=id,key&maxResults=100&startAt=%u" - % (u, instance, startAt) + % (u, instance, start_at) ) # print(openURL) try: - ojs = jsonapi.get(openURL, auth=creds) + ojs = jsonapi.get(open_url, auth=creds) if not "issues" in ojs or len(ojs["issues"]) == 0: break for item in ojs["issues"]: - openTickets.append(item["key"]) - KibbleBit.pprint("Found %u open tickets" % len(openTickets)) - startAt += 100 - except: - KibbleBit.pprint("JIRA borked, retrying") - badTries += 1 - KibbleBit.pprint("Found %u open tickets" % len(openTickets)) - - badOnes = 0 - for i in reversed(range(1, lastTicket + 1)): + open_tickets.append(item["key"]) + kibble_bit.pprint("Found %u open tickets" % len(open_tickets)) + start_at += 100 + except: # pylint: disable=bare-except + kibble_bit.pprint("JIRA borked, retrying") + bad_tries += 1 + kibble_bit.pprint("Found %u open tickets" % len(open_tickets)) + + for i in reversed(range(1, last_ticket + 1)): key = "%s-%u" % (instance, i) - pendingTickets.append([key, u, source]) + pending_tickets.append([key, u, source]) threads = [] block = threading.Lock() - KibbleBit.pprint("Scanning tickets using 4 sub-threads") + kibble_bit.pprint("Scanning tickets using 4 sub-threads") for i in range(0, 4): - t = jiraThread(block, KibbleBit, source, creds, pendingTickets, openTickets) + t = JiraThread( + block, kibble_bit, source, creds, pending_tickets, open_tickets + ) threads.append(t) t.start() for t in threads: t.join() - KibbleBit.pprint("Done scanning %s" % source["sourceURL"]) + kibble_bit.pprint("Done scanning %s" % source["sourceURL"]) source["steps"]["issues"] = { "time": time.time(), @@ -458,4 +458,4 @@ def scan(KibbleBit, source): "running": False, "good": True, } - KibbleBit.updateSource(source) + kibble_bit.update_source(source) diff --git a/kibble/scanners/scanners/pipermail.py b/kibble/scanners/scanners/pipermail.py index 606a97eb..9a0c598c 100644 --- a/kibble/scanners/scanners/pipermail.py +++ b/kibble/scanners/scanners/pipermail.py @@ -43,11 +43,11 @@ def accepts(source): return False -def scan(KibbleBit, source): +def scan(kibble_bit, source): url = source["sourceURL"] pipermail = re.match(r"(https?://.+/(archives|pipermail)/.+?)/?$", url) if pipermail: - KibbleBit.pprint("Scanning Pipermail source %s" % url) + kibble_bit.pprint("Scanning Pipermail source %s" % url) skipped = 0 source["steps"]["mail"] = { @@ -56,10 +56,10 @@ def scan(KibbleBit, source): "running": True, "good": True, } - KibbleBit.updateSource(source) + kibble_bit.update_source(source) dt = time.gmtime(time.time()) - firstYear = 1970 + first_year = 1970 year = dt[0] month = dt[1] if month <= 0: @@ -70,7 +70,7 @@ def scan(KibbleBit, source): knowns = {} # While we have older archives, continue to parse - monthNames = [ + month_names = [ "December", "January", "February", @@ -85,16 +85,15 @@ def scan(KibbleBit, source): "November", "December", ] - while firstYear <= year: - gzurl = "%s/%04u-%s.txt.gz" % (url, year, monthNames[month]) + while first_year <= year: + gzurl = "%s/%04u-%s.txt.gz" % (url, year, month_names[month]) pd = datetime.date(year, month, 1).timetuple() dhash = hashlib.sha224( ("%s %s" % (source["organisation"], gzurl)).encode( "ascii", errors="replace" ) ).hexdigest() - found = False - found = KibbleBit.exists("mailstats", dhash) + found = kibble_bit.exists("mailstats", dhash) if ( months <= 1 or not found ): # Always parse this month's stats and the previous month :) @@ -153,7 +152,7 @@ def scan(KibbleBit, source): if m: name = m.group(1).replace('"', "").strip() sender = m.group(2) - if not sender in posters: + if sender not in posters: posters[sender] = {"name": name, "email": sender} senders[message.get("message-id", "??")] = sender mdate = email.utils.parsedate_tz(message["date"]) @@ -161,15 +160,15 @@ def scan(KibbleBit, source): "%Y/%m/%d %H:%M:%S", time.gmtime(email.utils.mktime_tz(mdate)), ) - if not sender in knowns: + if sender not in knowns: sid = hashlib.sha1( ("%s%s" % (source["organisation"], sender)).encode( "ascii", errors="replace" ) ).hexdigest() - knowns[sender] = KibbleBit.exists("person", sid) - if not sender in knowns: - KibbleBit.append( + knowns[sender] = kibble_bit.exists("person", sid) + if sender not in knowns: + kibble_bit.append( "person", { "name": name, @@ -196,20 +195,17 @@ def scan(KibbleBit, source): "ts": email.utils.mktime_tz(mdate), "id": message["message-id"], } - KibbleBit.append("email", jse) + kibble_bit.append("email", jse) - for sender in posters: - no_posters += 1 + no_posters = len(posters) + topics = len(rawtopics) i = 0 - topics = 0 - for key in rawtopics: - topics += 1 for key in reversed(sorted(rawtopics, key=lambda x: x)): val = rawtopics[key] i += 1 if i > 10: break - KibbleBit.pprint( + kibble_bit.pprint( "Found top 10: %s (%s emails)" % (key, val) ) shash = hashlib.sha224( @@ -238,7 +234,7 @@ def scan(KibbleBit, source): "ts": time.mktime(pd), "id": mlhash, } - KibbleBit.index("mailtop", mlhash, jst) + kibble_bit.index("mailtop", mlhash, jst) jso = { "organisation": source["organisation"], @@ -249,24 +245,24 @@ def scan(KibbleBit, source): "emails": emails, "topics": topics, } - KibbleBit.index("mailstats", dhash, jso) + kibble_bit.index("mailstats", dhash, jso) os.unlink(mailFile) except Exception as err: - KibbleBit.pprint( + kibble_bit.pprint( "Couldn't parse %s, skipping: %s" % (gzurl, err) ) skipped += 1 if skipped > 12: - KibbleBit.pprint( + kibble_bit.pprint( "12 skips in a row, breaking off (no more data?)" ) break else: - KibbleBit.pprint("Couldn't find %s, skipping." % gzurl) + kibble_bit.pprint("Couldn't find %s, skipping." % gzurl) skipped += 1 if skipped > 12: - KibbleBit.pprint( + kibble_bit.pprint( "12 skips in a row, breaking off (no more data?)" ) break @@ -282,13 +278,13 @@ def scan(KibbleBit, source): "running": False, "good": True, } - KibbleBit.updateSource(source) + kibble_bit.update_source(source) else: - KibbleBit.pprint("Invalid Pipermail URL detected: %s" % url, True) + kibble_bit.pprint("Invalid Pipermail URL detected: %s" % url, True) source["steps"]["mail"] = { "time": time.time(), "status": "Invalid or malformed URL detected!", "running": False, "good": False, } - KibbleBit.updateSource(source) + kibble_bit.update_source(source) diff --git a/kibble/scanners/scanners/ponymail-kpe.py b/kibble/scanners/scanners/ponymail-kpe.py index 50edac75..8db47c25 100644 --- a/kibble/scanners/scanners/ponymail-kpe.py +++ b/kibble/scanners/scanners/ponymail-kpe.py @@ -47,11 +47,11 @@ def accepts(source): return False -def scan(KibbleBit, source): +def scan(kibble_bit, source): # Validate URL first url = re.match(r"(https?://.+)/list\.html\?(.+)@(.+)", source["sourceURL"]) if not url: - KibbleBit.pprint( + kibble_bit.pprint( "Malformed or invalid Pony Mail URL passed to scanner: %s" % source["sourceURL"] ) @@ -61,11 +61,11 @@ def scan(KibbleBit, source): "running": False, "good": False, } - KibbleBit.updateSource(source) + kibble_bit.update_source(source) return - if not "azure" in KibbleBit.config and not "picoapi" in KibbleBit.config: - KibbleBit.pprint( + if not "azure" in kibble_bit.config and not "picoapi" in kibble_bit.config: + kibble_bit.pprint( "No Azure/picoAPI creds configured, skipping key phrase extraction" ) return @@ -74,15 +74,15 @@ def scan(KibbleBit, source): if "creds" in source and source["creds"]: cookie = source["creds"].get("cookie", None) - rootURL = re.sub(r"list.html.+", "", source["sourceURL"]) + root_url = re.sub(r"list.html.+", "", source["sourceURL"]) query = { "query": {"bool": {"must": [{"term": {"sourceID": source["sourceID"]}}]}}, "sort": [{"ts": "desc"}], } # Get an initial count of commits - res = KibbleBit.broker.DB.search( - index=KibbleBit.dbname, doc_type="email", body=query, size=MAX_COUNT * 4 + res = kibble_bit.broker.DB.search( + index=kibble_bit.dbname, doc_type="email", body=query, size=MAX_COUNT * 4 ) ec = 0 hits = [] @@ -93,30 +93,31 @@ def scan(KibbleBit, source): if ec > MAX_COUNT: break if "kpe" not in eml: - emlurl = "%s/api/email.lua?id=%s" % (rootURL, eml["id"]) - KibbleBit.pprint("Fetching %s" % emlurl) + emlurl = "%s/api/email.lua?id=%s" % (root_url, eml["id"]) + kibble_bit.pprint("Fetching %s" % emlurl) rv = None try: rv = jsonapi.get(emlurl, cookie=cookie) if rv and "body" in rv: hits.append([hit["_id"], rv["body"], eml]) except Exception as err: - KibbleBit.pprint(f"Server error: {err}, skipping this email") + kibble_bit.pprint(f"Server error: {err}, skipping this email") bodies = [] for hit in hits: body = hit[1] - bid = hit[0] + # bid = hit[0] bodies.append(body) if bodies: - if "watson" in KibbleBit.config: + KPEs = None + if "watson" in kibble_bit.config: pass # Haven't written this yet - elif "azure" in KibbleBit.config: - KPEs = kpe.azureKPE(KibbleBit, bodies) - elif "picoapi" in KibbleBit.config: - KPEs = kpe.picoKPE(KibbleBit, bodies) + elif "azure" in kibble_bit.config: + KPEs = kpe.azureKPE(kibble_bit, bodies) + elif "picoapi" in kibble_bit.config: + KPEs = kpe.picoKPE(kibble_bit, bodies) if not KPEs: - KibbleBit.pprint("Hit rate limit, not trying further emails for now.") + kibble_bit.pprint("Hit rate limit, not trying further emails for now.") a = 0 for hit in hits: @@ -128,7 +129,7 @@ def scan(KibbleBit, source): kpe_ = ["_NULL_"] eml["kpe"] = kpe_ print("Key phrases for %s: %s" % (bid, ", ".join(kpe_))) - KibbleBit.index("email", bid, eml) + kibble_bit.index("email", bid, eml) else: - KibbleBit.pprint("No emails to analyze") - KibbleBit.pprint("Done with key phrase extraction") + kibble_bit.pprint("No emails to analyze") + kibble_bit.pprint("Done with key phrase extraction") diff --git a/kibble/scanners/scanners/ponymail-tone.py b/kibble/scanners/scanners/ponymail-tone.py index e0dea32e..4ae9330c 100644 --- a/kibble/scanners/scanners/ponymail-tone.py +++ b/kibble/scanners/scanners/ponymail-tone.py @@ -44,11 +44,11 @@ def accepts(source): return False -def scan(KibbleBit, source): +def scan(kibble_bit, source): # Validate URL first url = re.match(r"(https?://.+)/list\.html\?(.+)@(.+)", source["sourceURL"]) if not url: - KibbleBit.pprint( + kibble_bit.pprint( "Malformed or invalid Pony Mail URL passed to scanner: %s" % source["sourceURL"] ) @@ -58,15 +58,15 @@ def scan(KibbleBit, source): "running": False, "good": False, } - KibbleBit.updateSource(source) + kibble_bit.update_source(source) return if ( - not "watson" in KibbleBit.config - and not "azure" in KibbleBit.config - and not "picoapi" in KibbleBit.config + not "watson" in kibble_bit.config + and not "azure" in kibble_bit.config + and not "picoapi" in kibble_bit.config ): - KibbleBit.pprint( + kibble_bit.pprint( "No Watson/Azure/picoAPI creds configured, skipping tone analyzer" ) return @@ -75,15 +75,15 @@ def scan(KibbleBit, source): if "creds" in source and source["creds"]: cookie = source["creds"].get("cookie", None) - rootURL = re.sub(r"list.html.+", "", source["sourceURL"]) + root_url = re.sub(r"list.html.+", "", source["sourceURL"]) query = { "query": {"bool": {"must": [{"term": {"sourceID": source["sourceID"]}}]}}, "sort": [{"ts": "desc"}], } # Get an initial count of commits - res = KibbleBit.broker.DB.search( - index=KibbleBit.dbname, doc_type="email", body=query, size=MAX_COUNT * 4 + res = kibble_bit.broker.DB.search( + index=kibble_bit.dbname, doc_type="email", body=query, size=MAX_COUNT * 4 ) ec = 0 hits = [] @@ -94,30 +94,30 @@ def scan(KibbleBit, source): if ec > MAX_COUNT: break if "mood" not in eml: - emlurl = "%s/api/email.lua?id=%s" % (rootURL, eml["id"]) - KibbleBit.pprint("Fetching %s" % emlurl) - rv = None + emlurl = "%s/api/email.lua?id=%s" % (root_url, eml["id"]) + kibble_bit.pprint("Fetching %s" % emlurl) try: rv = jsonapi.get(emlurl, cookie=cookie) if rv and "body" in rv: hits.append([hit["_id"], rv["body"], eml]) except Exception as err: - KibbleBit.pprint(f"Server error: {err}, skipping this email") + kibble_bit.pprint(f"Server error: {err}, skipping this email") bodies = [] for hit in hits: body = hit[1] - bid = hit[0] + # bid = hit[0] bodies.append(body) if bodies: - if "watson" in KibbleBit.config: - moods = tone.watsonTone(KibbleBit, bodies) - elif "azure" in KibbleBit.config: - moods = tone.azureTone(KibbleBit, bodies) - elif "picoapi" in KibbleBit.config: - moods = tone.picoTone(KibbleBit, bodies) + moods = None + if "watson" in kibble_bit.config: + moods = tone.watsonTone(kibble_bit, bodies) + elif "azure" in kibble_bit.config: + moods = tone.azureTone(kibble_bit, bodies) + elif "picoapi" in kibble_bit.config: + moods = tone.picoTone(kibble_bit, bodies) if not moods: - KibbleBit.pprint("Hit rate limit, not trying further emails for now.") + kibble_bit.pprint("Hit rate limit, not trying further emails for now.") a = 0 for hit in hits: @@ -131,7 +131,7 @@ def scan(KibbleBit, source): if s > hm[0]: hm = [s, m] print("Likeliest overall mood for %s: %s" % (bid, hm[1])) - KibbleBit.index("email", bid, eml) + kibble_bit.index("email", bid, eml) else: - KibbleBit.pprint("No emails to analyze") - KibbleBit.pprint("Done with tone analysis") + kibble_bit.pprint("No emails to analyze") + kibble_bit.pprint("Done with tone analysis") diff --git a/kibble/scanners/scanners/ponymail.py b/kibble/scanners/scanners/ponymail.py index 92a5b0bc..69a49ca8 100644 --- a/kibble/scanners/scanners/ponymail.py +++ b/kibble/scanners/scanners/ponymail.py @@ -45,42 +45,42 @@ def accepts(source): return False -def countSubs(struct, kids=0): +def count_subs(struct, kids=0): """ Counts replies in a thread """ if "children" in struct and len(struct["children"]) > 0: for child in struct["children"]: kids += 1 - kids += countSubs(child) + kids += count_subs(child) return kids -def repliedTo(emails, struct): - myList = {} +def replied_to(emails, struct): + my_list = {} for eml in struct: - myID = eml["tid"] + my_id = eml["tid"] if "children" in eml: for child in eml["children"]: - myList[child["tid"]] = myID + my_list[child["tid"]] = my_id if len(child["children"]) > 0: - cList = repliedTo(emails, child["children"]) - myList.update(cList) - return myList + c_list = replied_to(emails, child["children"]) + my_list.update(c_list) + return my_list -def getSender(email): +def get_sender(email): sender = email["from"] m = re.match(r"(.+)\s*<(.+)>", email["from"], flags=re.UNICODE) if m: - name = m.group(1).replace('"', "").strip() + # name = m.group(1).replace('"', "").strip() sender = m.group(2) return sender -def scan(KibbleBit, source): +def scan(kibble_bit, source): # Validate URL first url = re.match(r"(https?://.+)/list\.html\?(.+)@(.+)", source["sourceURL"]) if not url: - KibbleBit.pprint( + kibble_bit.pprint( "Malformed or invalid Pony Mail URL passed to scanner: %s" % source["sourceURL"] ) @@ -90,7 +90,7 @@ def scan(KibbleBit, source): "running": False, "good": False, } - KibbleBit.updateSource(source) + kibble_bit.update_source(source) return # Pony Mail requires a UI cookie in order to work. Maked sure we have one! @@ -98,7 +98,7 @@ def scan(KibbleBit, source): if "creds" in source and source["creds"]: cookie = source["creds"].get("cookie", None) if not cookie: - KibbleBit.pprint( + kibble_bit.pprint( "Pony Mail instance at %s requires an authorized cookie, none found! Bailing." % source["sourceURL"] ) @@ -108,18 +108,18 @@ def scan(KibbleBit, source): "running": False, "good": False, } - KibbleBit.updateSource(source) + kibble_bit.update_source(source) return # Notify scanner and DB that this is valid and we've begun parsing - KibbleBit.pprint("%s is a valid Pony Mail address, parsing" % source["sourceURL"]) + kibble_bit.pprint("%s is a valid Pony Mail address, parsing" % source["sourceURL"]) source["steps"]["mail"] = { "time": time.time(), "status": "Downloading Pony Mail statistics", "running": True, "good": True, } - KibbleBit.updateSource(source) + kibble_bit.update_source(source) # Get base URL, list and domain to parse u = url.group(1) @@ -128,7 +128,7 @@ def scan(KibbleBit, source): # Get this month dt = time.gmtime(time.time()) - firstYear = 1970 + first_year = 1970 year = dt[0] month = dt[1] if month <= 0: @@ -140,7 +140,7 @@ def scan(KibbleBit, source): knowns = {} # While we have older archives, continue to parse - while firstYear <= year: + while first_year <= year: statsurl = "%s/api/stats.lua?list=%s&domain=%s&d=%s" % ( u, l, @@ -153,36 +153,36 @@ def scan(KibbleBit, source): ) ).hexdigest() found = False - if KibbleBit.exists("mailstats", dhash): + if kibble_bit.exists("mailstats", dhash): found = True if months <= 1 or not found: # Always parse this month's stats :) months += 1 - KibbleBit.pprint("Parsing %04u-%02u" % (year, month)) - KibbleBit.pprint(statsurl) + kibble_bit.pprint("Parsing %04u-%02u" % (year, month)) + kibble_bit.pprint(statsurl) pd = datetime.date(year, month, 1).timetuple() try: js = jsonapi.get(statsurl, cookie=cookie) except Exception as err: - KibbleBit.pprint(f"Server error: {err}, skipping this month") + kibble_bit.pprint(f"Server error: {err}, skipping this month") month -= 1 if month <= 0: month += 12 year -= 1 continue if "firstYear" in js: - firstYear = js["firstYear"] + first_year = js["firstYear"] # print("First Year is %u" % firstYear) else: - KibbleBit.pprint("JSON was missing fields, aborting!") + kibble_bit.pprint("JSON was missing fields, aborting!") break - replyList = repliedTo(js["emails"], js["thread_struct"]) + reply_list = replied_to(js["emails"], js["thread_struct"]) topics = js["no_threads"] posters = {} no_posters = 0 emails = len(js["emails"]) top10 = [] for eml in js["thread_struct"]: - count = countSubs(eml, 0) + count = count_subs(eml, 0) subject = "" for reml in js["emails"]: if reml["id"] == eml["tid"]: @@ -202,7 +202,7 @@ def scan(KibbleBit, source): i += 1 if i > 10: break - KibbleBit.pprint("Found top 10: %s (%s emails)" % (top[1], top[2])) + kibble_bit.pprint("Found top 10: %s (%s emails)" % (top[1], top[2])) md = time.strftime("%Y/%m/%d %H:%M:%S", pd) mlhash = hashlib.sha224( ( @@ -221,7 +221,7 @@ def scan(KibbleBit, source): "ts": time.mktime(pd), "id": mlhash, } - KibbleBit.index("mailtop", mlhash, jst) + kibble_bit.index("mailtop", mlhash, jst) for email in js["emails"]: sender = email["from"] @@ -238,10 +238,10 @@ def scan(KibbleBit, source): "ascii", errors="replace" ) ).hexdigest() - if KibbleBit.exists("person", sid): + if kibble_bit.exists("person", sid): knowns[sender] = True if not sender in knowns or name != sender: - KibbleBit.append( + kibble_bit.append( "person", { "upsert": True, @@ -256,12 +256,12 @@ def scan(KibbleBit, source): }, ) knowns[sender] = True - replyTo = None - if email["id"] in replyList: - rt = replyList[email["id"]] + reply_to = None + if email["id"] in reply_list: + rt = reply_list[email["id"]] for eml in js["emails"]: if eml["id"] == rt: - replyTo = getSender(eml) + reply_to = get_sender(eml) print("Email was reply to %s" % sender) jse = { "organisation": source["organisation"], @@ -273,14 +273,13 @@ def scan(KibbleBit, source): "sender": sender, "address": sender, "subject": email["subject"], - "replyto": replyTo, + "replyto": reply_to, "ts": email["epoch"], "id": email["id"], "upsert": True, } - KibbleBit.append("email", jse) - for sender in posters: - no_posters += 1 + kibble_bit.append("email", jse) + no_posters = len(posters) jso = { "organisation": source["organisation"], @@ -292,7 +291,7 @@ def scan(KibbleBit, source): "topics": topics, } # print("Indexing as %s" % dhash) - KibbleBit.index("mailstats", dhash, jso) + kibble_bit.index("mailstats", dhash, jso) month -= 1 if month <= 0: month += 12 @@ -305,4 +304,4 @@ def scan(KibbleBit, source): "running": False, "good": True, } - KibbleBit.updateSource(source) + kibble_bit.update_source(source) diff --git a/kibble/scanners/scanners/travis.py b/kibble/scanners/scanners/travis.py index 22b4975a..2c7852f6 100644 --- a/kibble/scanners/scanners/travis.py +++ b/kibble/scanners/scanners/travis.py @@ -39,44 +39,37 @@ def accepts(source): return False -def scanJob(KibbleBit, source, bid, token, TLD): +def scan_job(kibble_bit, source, bid, token, TLD): """ Scans a single job for activity """ - # NOW = int(datetime.datetime.utcnow().timestamp()) - dhash = hashlib.sha224( - ("%s-%s-%s" % (source["organisation"], source["sourceURL"], bid)).encode( - "ascii", errors="replace" - ) - ).hexdigest() - # Get the job data pages = 0 offset = 0 last_page = False - oURL = "https://api.travis-ci.%s/repo/%s/builds" % (TLD, bid) + o_url = "https://api.travis-ci.%s/repo/%s/builds" % (TLD, bid) # For as long as pagination makes sense... while not last_page: - bURL = "https://api.travis-ci.%s/repo/%s/builds?limit=100&offset=%u" % ( + b_url = "https://api.travis-ci.%s/repo/%s/builds?limit=100&offset=%u" % ( TLD, bid, offset, ) - KibbleBit.pprint("Scanning %s" % bURL) + kibble_bit.pprint("Scanning %s" % b_url) rv = requests.get( - bURL, + b_url, headers={"Travis-API-Version": "3", "Authorization": "token %s" % token}, ) if rv.status_code == 200: repojs = rv.json() # If travis tells us it's the last page, trust it. if repojs["@pagination"]["is_last"]: - KibbleBit.pprint( + kibble_bit.pprint( "Assuming this is the last page we need (travis says so)" ) last_page = True - KibbleBit.pprint( - "%s has %u builds done" % (bURL, repojs["@pagination"]["count"]) + kibble_bit.pprint( + "%s has %u builds done" % (b_url, repojs["@pagination"]["count"]) ) # BREAKER: If we go past count somehow, and travis doesn't say so, bork anyway @@ -85,10 +78,10 @@ def scanJob(KibbleBit, source, bid, token, TLD): offset += 100 for build in repojs.get("builds", []): - buildID = build["id"] - buildProject = build["repository"]["slug"] - startedAt = build["started_at"] - finishedAt = build["finished_at"] + build_id = build["id"] + build_project = build["repository"]["slug"] + started_at = build["started_at"] + finished_at = build["finished_at"] duration = build["duration"] completed = True if duration else False duration = duration or 0 @@ -96,13 +89,13 @@ def scanJob(KibbleBit, source, bid, token, TLD): buildhash = hashlib.sha224( ( "%s-%s-%s-%s" - % (source["organisation"], source["sourceURL"], bid, buildID) + % (source["organisation"], source["sourceURL"], bid, build_id) ).encode("ascii", errors="replace") ).hexdigest() builddoc = None try: - builddoc = KibbleBit.get("ci_build", buildhash) - except: + builddoc = kibble_bit.get("ci_build", buildhash) + except: # pylint: disable=bare-except pass # If this build already completed, no need to parse it again @@ -110,7 +103,7 @@ def scanJob(KibbleBit, source, bid, token, TLD): # If we're on page > 1 and we've seen a completed build, assume # that we don't need the older ones if pages > 1: - KibbleBit.pprint( + kibble_bit.pprint( "Assuming this is the last page we need (found completed build on page > 1)" ) last_page = True @@ -126,16 +119,16 @@ def scanJob(KibbleBit, source, bid, token, TLD): if build["state"] in ["aborted", "canceled"]: status = "aborted" - FIN = 0 - STA = 0 - if finishedAt: - FIN = datetime.datetime.strptime( - finishedAt, "%Y-%m-%dT%H:%M:%SZ" + fin = 0 + sta = 0 + if finished_at: + fin = datetime.datetime.strptime( + finished_at, "%Y-%m-%dT%H:%M:%SZ" ).timestamp() - if startedAt: - STA = int( + if started_at: + sta = int( datetime.datetime.strptime( - startedAt, "%Y-%m-%dT%H:%M:%SZ" + started_at, "%Y-%m-%dT%H:%M:%SZ" ).timestamp() ) @@ -145,14 +138,14 @@ def scanJob(KibbleBit, source, bid, token, TLD): doc = { # Build specific data "id": buildhash, - "date": time.strftime("%Y/%m/%d %H:%M:%S", time.gmtime(FIN)), - "buildID": buildID, + "date": time.strftime("%Y/%m/%d %H:%M:%S", time.gmtime(fin)), + "buildID": build_id, "completed": completed, "duration": duration * 1000, - "job": buildProject, - "jobURL": oURL, + "job": build_project, + "jobURL": o_url, "status": status, - "started": STA, + "started": sta, "ci": "travis", "queuetime": queuetime, # Standard docs values @@ -160,31 +153,31 @@ def scanJob(KibbleBit, source, bid, token, TLD): "organisation": source["organisation"], "upsert": True, } - KibbleBit.append("ci_build", doc) + kibble_bit.append("ci_build", doc) pages += 1 else: # We hit a snag, abort! - KibbleBit.pprint("Travis returned a non-200 response, aborting.") + kibble_bit.pprint("Travis returned a non-200 response, aborting.") return False return True -class travisThread(threading.Thread): +class TravisThread(threading.Thread): """ Generic thread class for scheduling multiple scans at once """ - def __init__(self, block, KibbleBit, source, token, jobs, TLD): - super(travisThread, self).__init__() + def __init__(self, block, kibble_bit, source, token, jobs, TLD): + super(TravisThread, self).__init__() self.block = block - self.KibbleBit = KibbleBit + self.kibble_bit = kibble_bit self.token = token self.source = source self.jobs = jobs self.tld = TLD def run(self): - badOnes = 0 - while len(self.jobs) > 0 and badOnes <= 50: + bad_ones = 0 + while len(self.jobs) > 0 and bad_ones <= 50: self.block.acquire() try: job = self.jobs.pop(0) @@ -195,11 +188,11 @@ def run(self): self.block.release() return self.block.release() - if not scanJob(self.KibbleBit, self.source, job, self.token, self.tld): - self.KibbleBit.pprint("[%s] This borked, trying another one" % job) - badOnes += 1 - if badOnes > 100: - self.KibbleBit.pprint("Too many errors, bailing!") + if not scan_job(self.kibble_bit, self.source, job, self.token, self.tld): + self.kibble_bit.pprint("[%s] This borked, trying another one" % job) + bad_ones += 1 + if bad_ones > 100: + self.kibble_bit.pprint("Too many errors, bailing!") self.source["steps"]["travis"] = { "time": time.time(), "status": "Too many errors while parsing at " @@ -207,38 +200,37 @@ def run(self): "running": False, "good": False, } - self.KibbleBit.updateSource(self.source) + self.kibble_bit.update_source(self.source) return else: - badOnes = 0 + bad_ones = 0 -def sclan(KibbleBit, source): +def scan(kibble_bit, source): # Simple URL check travis = re.match(r"https?://travis-ci\.(org|com)", source["sourceURL"]) if travis: # Is this travs-ci.org or travis-ci.com - we need to know! - TLD = travis.group(1) + tld = travis.group(1) source["steps"]["travis"] = { "time": time.time(), "status": "Parsing Travis job changes...", "running": True, "good": True, } - KibbleBit.updateSource(source) + kibble_bit.update_source(source) - pendingJobs = [] - KibbleBit.pprint("Parsing Travis activity at %s" % source["sourceURL"]) + pending_jobs = [] + kibble_bit.pprint("Parsing Travis activity at %s" % source["sourceURL"]) source["steps"]["travis"] = { "time": time.time(), "status": "Downloading changeset", "running": True, "good": True, } - KibbleBit.updateSource(source) + kibble_bit.update_source(source) # Travis needs a token - token = None if ( source["creds"] and "token" in source["creds"] @@ -247,7 +239,7 @@ def sclan(KibbleBit, source): ): token = source["creds"]["token"] else: - KibbleBit.pprint("Travis CI requires a token to work!") + kibble_bit.pprint("Travis CI requires a token to work!") return False # Used for pagination @@ -262,15 +254,15 @@ def sclan(KibbleBit, source): stuck = 0 # Ditto avgqueuetime = 0 # Ditto, fake it - maybeQueued = [] + maybe_queued = [] while jobs == 100: - URL = ( + url = ( "https://api.travis-ci.%s/repos?repository.active=true&sort_by=current_build:desc&offset=%u&limit=100&include=repository.last_started_build" - % (TLD, offset) + % (tld, offset) ) offset += 100 r = requests.get( - URL, + url, headers={ "Travis-API-Version": "3", "Authorization": "token %s" % token, @@ -278,7 +270,7 @@ def sclan(KibbleBit, source): ) if r.status_code != 200: - KibbleBit.pprint("Travis did not return a 200 Okay, bad token?!") + kibble_bit.pprint("Travis did not return a 200 Okay, bad token?!") source["steps"]["travis"] = { "time": time.time(), @@ -289,7 +281,7 @@ def sclan(KibbleBit, source): "running": False, "good": False, } - KibbleBit.updateSource(source) + kibble_bit.update_source(source) return # For each build job @@ -301,20 +293,20 @@ def sclan(KibbleBit, source): # Is the build currently running? if cb["state"] in ["started", "created", "queued", "pending"]: for job in cb.get("jobs", []): - maybeQueued.append(job["id"]) + maybe_queued.append(job["id"]) # Queue up build jobs for the threaded scanner bid = repo["id"] - pendingJobs.append(bid) + pending_jobs.append(bid) jobs = len(js["repositories"]) - KibbleBit.pprint("Scanned %u jobs..." % total) + kibble_bit.pprint("Scanned %u jobs..." % total) # Find out how many building and pending jobs - for jobID in maybeQueued: - URL = "https://api.travis-ci.%s/job/%u" % (TLD, jobID) + for job_id in maybe_queued: + url = "https://api.travis-ci.%s/job/%u" % (tld, job_id) r = requests.get( - URL, + url, headers={ "Travis-API-Version": "3", "Authorization": "token %s" % token, @@ -324,15 +316,15 @@ def sclan(KibbleBit, source): jobjs = r.json() if jobjs["state"] == "started": building += 1 - KibbleBit.pprint("Job %u is building" % jobID) + kibble_bit.pprint("Job %u is building" % job_id) elif jobjs["state"] in ["created", "queued", "pending"]: queued += 1 blocked += 1 # Queued in Travis generally means a job can't find an executor, and thus is blocked. - KibbleBit.pprint("Job %u is pending" % jobID) - KibbleBit.pprint("%u building, %u queued..." % (building, queued)) + kibble_bit.pprint("Job %u is pending" % job_id) + kibble_bit.pprint("%u building, %u queued..." % (building, queued)) # Save queue snapshot - NOW = int(datetime.datetime.utcnow().timestamp()) + now = int(datetime.datetime.utcnow().timestamp()) queuehash = hashlib.sha224( ( "%s-%s-queue-%s" @@ -343,8 +335,8 @@ def sclan(KibbleBit, source): # Write up a queue doc queuedoc = { "id": queuehash, - "date": time.strftime("%Y/%m/%d %H:%M:%S", time.gmtime(NOW)), - "time": NOW, + "date": time.strftime("%Y/%m/%d %H:%M:%S", time.gmtime(now)), + "time": now, "building": building, "size": queued, "blocked": blocked, @@ -356,15 +348,15 @@ def sclan(KibbleBit, source): "organisation": source["organisation"], "upsert": True, } - KibbleBit.append("ci_queue", queuedoc) + kibble_bit.append("ci_queue", queuedoc) - KibbleBit.pprint("Found %u jobs in Travis" % len(pendingJobs)) + kibble_bit.pprint("Found %u jobs in Travis" % len(pending_jobs)) threads = [] block = threading.Lock() - KibbleBit.pprint("Scanning jobs using 4 sub-threads") + kibble_bit.pprint("Scanning jobs using 4 sub-threads") for i in range(0, 4): - t = travisThread(block, KibbleBit, source, token, pendingJobs, TLD) + t = TravisThread(block, kibble_bit, source, token, pending_jobs, tld) threads.append(t) t.start() @@ -372,7 +364,7 @@ def sclan(KibbleBit, source): t.join() # We're all done, yaay - KibbleBit.pprint("Done scanning %s" % source["sourceURL"]) + kibble_bit.pprint("Done scanning %s" % source["sourceURL"]) source["steps"]["travis"] = { "time": time.time(), @@ -381,4 +373,4 @@ def sclan(KibbleBit, source): "running": False, "good": True, } - KibbleBit.updateSource(source) + kibble_bit.update_source(source) diff --git a/kibble/scanners/scanners/twitter.py b/kibble/scanners/scanners/twitter.py index fd6af5b2..dc87ed2a 100644 --- a/kibble/scanners/scanners/twitter.py +++ b/kibble/scanners/scanners/twitter.py @@ -37,7 +37,7 @@ def accepts(source): return False -def getFollowers(KibbleBit, source, t): +def get_followers(kibble_bit, source, t): """ Get followers of a handle, store them for mapping and trend purposes""" # Get our twitter handle handle = source["sourceURL"] @@ -59,18 +59,18 @@ def getFollowers(KibbleBit, source, t): "followers": no_followers, "date": d, } - KibbleBit.pprint("%s has %u followers currently." % (handle, no_followers)) - KibbleBit.index("twitter_followers", dhash, jst) + kibble_bit.pprint("%s has %u followers currently." % (handle, no_followers)) + kibble_bit.index("twitter_followers", dhash, jst) # Collect list of current followers followers = t.GetFollowers(screen_name=handle) # For each follower, if they're not mapped yet, add them # This has a limitation of 100 new added per run, but meh... - KibbleBit.pprint("Looking up followers of %s" % handle) + kibble_bit.pprint("Looking up followers of %s" % handle) for follower in followers: # id, name, screen_name are useful here - KibbleBit.pprint("Found %s as follower" % follower.screen_name) + kibble_bit.pprint("Found %s as follower" % follower.screen_name) # Store twitter follower profile if not already logged dhash = hashlib.sha224( @@ -78,7 +78,7 @@ def getFollowers(KibbleBit, source, t): "ascii", errors="replace" ) ).hexdigest() - if not KibbleBit.exists("twitter_follow", dhash): + if not kibble_bit.exists("twitter_follow", dhash): jst = { "organisation": source["organisation"], "sourceURL": source["sourceURL"], @@ -91,20 +91,20 @@ def getFollowers(KibbleBit, source, t): "%Y/%m/%d %H:%M:%S", time.gmtime() ), # First time we spotted them following. } - KibbleBit.pprint( + kibble_bit.pprint( "%s is new, recording date and details." % follower.screen_name ) - KibbleBit.index("twitter_follow", dhash, jst) + kibble_bit.index("twitter_follow", dhash, jst) -def scan(KibbleBit, source): +def scan(kibble_bit, source): source["steps"]["twitter"] = { "time": time.time(), "status": "Scanning Twitter activity and status", "running": True, "good": True, } - KibbleBit.updateSource(source) + kibble_bit.update_source(source) t = None if "creds" in source and source["creds"]: t = twitter.Api( @@ -113,22 +113,22 @@ def scan(KibbleBit, source): consumer_key=source["creds"].get("consumer_key", None), consumer_secret=source["creds"].get("consumer_secret", None), ) - KibbleBit.pprint("Verifying twitter credentials...") + kibble_bit.pprint("Verifying twitter credentials...") try: t.VerifyCredentials() - except: + except: # pylint: disable=bare-except source["steps"]["twitter"] = { "time": time.time(), "status": "Could not verify twitter credentials", "running": False, "good": False, } - KibbleBit.updateSource(source) - KibbleBit.pprint("Could not verify twitter creds, aborting!") + kibble_bit.update_source(source) + kibble_bit.pprint("Could not verify twitter creds, aborting!") return # Start by getting and saving followers try: - getFollowers(KibbleBit, source, t) + get_followers(kibble_bit, source, t) except Exception as err: source["steps"]["twitter"] = { "time": time.time(), @@ -136,8 +136,8 @@ def scan(KibbleBit, source): "running": False, "good": False, } - KibbleBit.updateSource(source) - KibbleBit.pprint("Twitter scan failed: %s" % err) + kibble_bit.update_source(source) + kibble_bit.pprint("Twitter scan failed: %s" % err) # All done, report that! source["steps"]["twitter"] = { @@ -147,4 +147,4 @@ def scan(KibbleBit, source): "running": False, "good": True, } - KibbleBit.updateSource(source) + kibble_bit.update_source(source) From e19cf6cc4de4c20e70da5de5ef37cb3c1668dbe3 Mon Sep 17 00:00:00 2001 From: Tomek Urbaszek Date: Sun, 13 Dec 2020 15:56:12 +0000 Subject: [PATCH 45/48] Make scanners use kibble.ini instead of config.yaml (#122) This PR moved watson, azure, picoapi and git configuration to kibble.ini config file. It fixes also reading ES configuration. In this way all Kibble configuration is in single place. --- kibble.ini | 20 ++ kibble/scanners/README.md | 2 +- kibble/scanners/brokers/kibbleES.py | 18 +- kibble/scanners/config.yaml | 39 --- kibble/scanners/scanners/git-census.py | 4 +- kibble/scanners/scanners/git-evolution.py | 5 +- kibble/scanners/scanners/git-sloc.py | 5 +- kibble/scanners/scanners/git-sync.py | 5 +- kibble/scanners/scanners/ponymail-kpe.py | 13 +- kibble/scanners/scanners/ponymail-tone.py | 20 +- kibble/scanners/utils/git.py | 17 +- kibble/scanners/utils/kpe.py | 218 ++++++++-------- kibble/scanners/utils/tone.py | 289 +++++++++++----------- kibble/settings.py | 6 + 14 files changed, 324 insertions(+), 337 deletions(-) delete mode 100644 kibble/scanners/config.yaml diff --git a/kibble.ini b/kibble.ini index 72f0e0cb..8e4f042a 100644 --- a/kibble.ini +++ b/kibble.ini @@ -25,6 +25,26 @@ scratchdir = /tmp # each node will gat 1/4th of all jobs to work on. balance = +[git] +# Comma-separated branch names +wanted_branches = + +# Watson/BlueMix configuration for sentiment analysis, if applicable +[watson] +username = +password = +api = https://gateway-location.watsonplatform.net/tone-analyzer/api + +# Azure Text Analysis API configuration, if applicable +[azure] +apikey = +location = west-us + +# picoAPI Text Analysis configuration +[picoapi] +key = + + [elasticsearch] # Elasticsearch database name dbname = kibble diff --git a/kibble/scanners/README.md b/kibble/scanners/README.md index 87da385d..5746a118 100644 --- a/kibble/scanners/README.md +++ b/kibble/scanners/README.md @@ -3,7 +3,7 @@ The Kibble Scanners collect information for the Kibble Suite. ## Setup instructions: - - Edit conf/config.yaml to match your Kibble service + - Edit kibble.ini to match your Kibble service ## How to run: diff --git a/kibble/scanners/brokers/kibbleES.py b/kibble/scanners/brokers/kibbleES.py index 5e6eb0db..5ee92d29 100644 --- a/kibble/scanners/brokers/kibbleES.py +++ b/kibble/scanners/brokers/kibbleES.py @@ -119,14 +119,13 @@ class KibbleBit: """ KibbleBit class with direct ElasticSearch access """ def __init__(self, broker, organisation, tid): - self.config = broker.config self.organisation = organisation self.broker = broker self.json_queue = [] self.queueMax = 1000 # Entries to keep before bulk pushing self.pluginname = "" self.tid = tid - self.dbname = self.broker.config["elasticsearch"]["database"] + self.dbname = conf.get("elasticsearch", "database") def __del__(self): """ On unload/delete, push the last chunks of data to ES """ @@ -144,7 +143,7 @@ def pprint(self, string, err=False): def update_source(self, source): """ Updates a source document, usually with a status update """ self.broker.DB.index( - index=self.broker.config["elasticsearch"]["database"], + index=self.dbname, doc_type="source", id=source["sourceID"], body=source, @@ -153,7 +152,7 @@ def update_source(self, source): def get(self, doctype, docid): """ Fetches a document from the DB """ doc = self.broker.DB.get( - index=self.broker.config["elasticsearch"]["database"], + index=self.dbname, doc_type=doctype, id=docid, ) @@ -164,14 +163,14 @@ def get(self, doctype, docid): def exists(self, doctype, docid): """ Checks whether a document already exists or not """ return self.broker.DB.exists( - index=self.broker.config["elasticsearch"]["database"], + index=self.dbname, doc_type=doctype, id=docid, ) def index(self, doctype, docid, document): """ Adds a new document to the index """ - dbname = self.broker.config["elasticsearch"]["database"] + dbname = self.dbname self.broker.DB.index(index=dbname, doc_type=doctype, id=docid, body=document) def append(self, t, doc): @@ -195,7 +194,7 @@ def bulk(self): js = entry doc = js js["@version"] = 1 - dbname = self.broker.config["elasticsearch"]["database"] + dbname = self.dbname if self.broker.noTypes: dbname += "_%s" % js["doctype"] js_arr.append( @@ -233,6 +232,7 @@ def __init__(self, broker, org): self.broker = broker self.id = org + self.dbname = conf.get("elasticsearch", "database") def sources(self, sourceType=None, view=None): """ Get all sources or sources of a specific type for an org """ @@ -241,7 +241,7 @@ def sources(self, sourceType=None, view=None): mustArray = [{"term": {"organisation": self.id}}] if view: res = self.broker.DB.get( - index=self.broker.config["elasticsearch"]["database"], + index=self.dbname, doc_type="view", id=view, ) @@ -252,7 +252,7 @@ def sources(self, sourceType=None, view=None): mustArray.append({"term": {"type": sourceType}}) # Run the search, fetch all results, 9999 max. TODO: Scroll??? res = self.broker.DB.search( - index=self.broker.config["elasticsearch"]["database"], + index=self.dbname, doc_type="source", size=9999, body={"query": {"bool": {"must": mustArray}}, "sort": {"sourceURL": "asc"}}, diff --git a/kibble/scanners/config.yaml b/kibble/scanners/config.yaml deleted file mode 100644 index d835539a..00000000 --- a/kibble/scanners/config.yaml +++ /dev/null @@ -1,39 +0,0 @@ -# If enabled, kibble scanners will use direct ES connection. -elasticsearch: - enabled: true - hostname: localhost - port: 9200 - ssl: false - uri: "" - database: kibble - -# If enabled, kibble scanners will use the HTTP JSON API -broker: - enabled: false - url: https://localhost/api/ - auth: - username: kibble - password: kibble4life - -# Scanner client options -scanner: - # scratchdir: Location for storing file objects like git repos etc - # This should be permanent to speed up scans of large repositories - # on consecutive scans, but may be ephemeral like /tmp - scratchdir: /tmp - # If you are load balancing the scans, you should specify - # how many nodes are working, and which one you are, - # using the format: $nodeNo/$totalNodes. If there are 4 nodes, - # each node will gat 1/4th of all jobs to work on. - #balance: 1/4 - -# Watson/BlueMix configuration for sentiment analysis, if applicable -#watson: -# username: uuid-here -# password: pass-here -# api: https://gateway-location.watsonplatform.net/tone-analyzer/api - -# Azure Text Analysis API configuration, if applicable -#azure: -# apikey: key-here -# location: west-us diff --git a/kibble/scanners/scanners/git-census.py b/kibble/scanners/scanners/git-census.py index 790ae492..b92f8690 100644 --- a/kibble/scanners/scanners/git-census.py +++ b/kibble/scanners/scanners/git-census.py @@ -24,6 +24,8 @@ import tempfile import time +from kibble.configuration import conf + title = "Census Scanner for Git" version = "0.1.0" @@ -50,7 +52,7 @@ def scan(kibble_bit, source): rid = source["sourceID"] url = source["sourceURL"] rootpath = "%s/%s/git" % ( - kibble_bit.config["scanner"]["scratchdir"], + conf.get("scanner", "scratchdir"), source["organisation"], ) gpath = os.path.join(rootpath, rid) diff --git a/kibble/scanners/scanners/git-evolution.py b/kibble/scanners/scanners/git-evolution.py index b533b4bb..8f4a8369 100644 --- a/kibble/scanners/scanners/git-evolution.py +++ b/kibble/scanners/scanners/git-evolution.py @@ -23,6 +23,7 @@ import subprocess import time +from kibble.configuration import conf from kibble.scanners.utils import sloc title = "Git Evolution Scanner" @@ -138,7 +139,7 @@ def scan(kibble_bit, source): rid = source["sourceID"] rootpath = "%s/%s/git" % ( - kibble_bit.config["scanner"]["scratchdir"], + conf.get("scanner", "scratchdir"), source["organisation"], ) gpath = os.path.join(rootpath, rid) @@ -158,7 +159,7 @@ def scan(kibble_bit, source): rid = source["sourceID"] url = source["sourceURL"] rootpath = "%s/%s/git" % ( - kibble_bit.config["scanner"]["scratchdir"], + conf.get("scanner", "scratchdir"), source["organisation"], ) gpath = os.path.join(rootpath, rid) diff --git a/kibble/scanners/scanners/git-sloc.py b/kibble/scanners/scanners/git-sloc.py index 8b44c543..4ae13ac4 100644 --- a/kibble/scanners/scanners/git-sloc.py +++ b/kibble/scanners/scanners/git-sloc.py @@ -19,6 +19,7 @@ import subprocess import time +from kibble.configuration import conf from kibble.scanners.utils import git, sloc """ Source Lines of Code counter for Git """ @@ -42,7 +43,7 @@ def scan(kibble_bit, source): rid = source["sourceID"] url = source["sourceURL"] rootpath = "%s/%s/git" % ( - kibble_bit.config["scanner"]["scratchdir"], + conf.get("scanner", "scratchdir"), source["organisation"], ) gpath = os.path.join(rootpath, rid) @@ -58,7 +59,7 @@ def scan(kibble_bit, source): kibble_bit.update_source(source) try: - branch = git.defaultBranch(source, gpath) + branch = git.default_branch(source, gpath) subprocess.call("cd %s && git checkout %s" % (gpath, branch), shell=True) except: # pylint: disable=bare-except kibble_bit.pprint("SLoC counter failed to find main branch for %s!!" % url) diff --git a/kibble/scanners/scanners/git-sync.py b/kibble/scanners/scanners/git-sync.py index c1c40f6d..aee39afd 100644 --- a/kibble/scanners/scanners/git-sync.py +++ b/kibble/scanners/scanners/git-sync.py @@ -19,6 +19,7 @@ import subprocess import time +from kibble.configuration import conf from kibble.scanners.utils import git title = "Sync plugin for Git repositories" @@ -41,7 +42,7 @@ def scan(kibble_bit, source): path = source["sourceID"] url = source["sourceURL"] rootpath = "%s/%s/git" % ( - kibble_bit.config["scanner"]["scratchdir"], + conf.get("scanner", "scratchdir"), source["organisation"], ) @@ -79,7 +80,7 @@ def scan(kibble_bit, source): kibble_bit.pprint("Repo %s exists, fetching changes..." % datapath) # Do we have a default branch here? - branch = git.defaultBranch(source, datapath, kibble_bit) + branch = git.default_branch(source, datapath) if len(branch) == 0: source["default_branch"] = branch source["steps"]["sync"] = { diff --git a/kibble/scanners/scanners/ponymail-kpe.py b/kibble/scanners/scanners/ponymail-kpe.py index 8db47c25..f7988acf 100644 --- a/kibble/scanners/scanners/ponymail-kpe.py +++ b/kibble/scanners/scanners/ponymail-kpe.py @@ -19,6 +19,7 @@ import time from kibble.scanners.utils import jsonapi, kpe +from kibble.settings import AZURE_ENABLED, PICOAPI_ENABLED, WATSON_ENABLED """ This is a Kibble scanner plugin for Apache Pony Mail sources. @@ -64,7 +65,7 @@ def scan(kibble_bit, source): kibble_bit.update_source(source) return - if not "azure" in kibble_bit.config and not "picoapi" in kibble_bit.config: + if not AZURE_ENABLED and not PICOAPI_ENABLED: kibble_bit.pprint( "No Azure/picoAPI creds configured, skipping key phrase extraction" ) @@ -110,12 +111,12 @@ def scan(kibble_bit, source): bodies.append(body) if bodies: KPEs = None - if "watson" in kibble_bit.config: + if WATSON_ENABLED: pass # Haven't written this yet - elif "azure" in kibble_bit.config: - KPEs = kpe.azureKPE(kibble_bit, bodies) - elif "picoapi" in kibble_bit.config: - KPEs = kpe.picoKPE(kibble_bit, bodies) + elif AZURE_ENABLED: + KPEs = kpe.azure_kpe(kibble_bit, bodies) + elif PICOAPI_ENABLED: + KPEs = kpe.pico_kpe(kibble_bit, bodies) if not KPEs: kibble_bit.pprint("Hit rate limit, not trying further emails for now.") diff --git a/kibble/scanners/scanners/ponymail-tone.py b/kibble/scanners/scanners/ponymail-tone.py index 4ae9330c..fe31217c 100644 --- a/kibble/scanners/scanners/ponymail-tone.py +++ b/kibble/scanners/scanners/ponymail-tone.py @@ -21,7 +21,9 @@ import re import time +from kibble.configuration import conf from kibble.scanners.utils import jsonapi, tone +from kibble.settings import AZURE_ENABLED, PICOAPI_ENABLED, WATSON_ENABLED title = "Tone/Mood Scanner plugin for Apache Pony Mail" version = "0.1.0" @@ -61,11 +63,7 @@ def scan(kibble_bit, source): kibble_bit.update_source(source) return - if ( - not "watson" in kibble_bit.config - and not "azure" in kibble_bit.config - and not "picoapi" in kibble_bit.config - ): + if not WATSON_ENABLED and not AZURE_ENABLED and not PICOAPI_ENABLED: kibble_bit.pprint( "No Watson/Azure/picoAPI creds configured, skipping tone analyzer" ) @@ -110,12 +108,12 @@ def scan(kibble_bit, source): bodies.append(body) if bodies: moods = None - if "watson" in kibble_bit.config: - moods = tone.watsonTone(kibble_bit, bodies) - elif "azure" in kibble_bit.config: - moods = tone.azureTone(kibble_bit, bodies) - elif "picoapi" in kibble_bit.config: - moods = tone.picoTone(kibble_bit, bodies) + if WATSON_ENABLED: + moods = tone.watson_tone(kibble_bit, bodies) + elif AZURE_ENABLED: + moods = tone.azure_tone(kibble_bit, bodies) + elif PICOAPI_ENABLED: + moods = tone.pico_tone(kibble_bit, bodies) if not moods: kibble_bit.pprint("Hit rate limit, not trying further emails for now.") diff --git a/kibble/scanners/utils/git.py b/kibble/scanners/utils/git.py index c1f3cd7f..2dd90997 100644 --- a/kibble/scanners/utils/git.py +++ b/kibble/scanners/utils/git.py @@ -17,21 +17,20 @@ """ This is the Kibble git utility plugin """ -import os import re import subprocess -import sys +from kibble.configuration import conf -def defaultBranch(source, datapath, KibbleBit=None): + +def default_branch(source, datapath): """ Tries to figure out what the main branch of a repo is """ - wanted_branches = ["master", "main", "trunk"] - branch = "" # If we have an override of branches we like, use 'em - if KibbleBit and KibbleBit.config.get("git"): - wanted_branches = KibbleBit.config["git"].get( - "wanted_branches", wanted_branches - ) + wanted_branches = conf.get("git", "wanted_branches", fallback=None) + if wanted_branches: + wanted_branches = wanted_branches.split(",") + else: + wanted_branches = ["master", "main", "trunk"] # For each wanted branch, in order, look for it in our clone, # and return the name if found. diff --git a/kibble/scanners/utils/kpe.py b/kibble/scanners/utils/kpe.py index 7eae14b1..799bbb88 100644 --- a/kibble/scanners/utils/kpe.py +++ b/kibble/scanners/utils/kpe.py @@ -19,16 +19,16 @@ This is an experimental key phrase extraction plugin for using Azure/picoAPI for analyzing the key elements of an email on a list. This requires an account with a text analysis service provider, and a -corresponding API section in config.yaml, as such: +corresponding API section in kibble.ini, as such: # picoAPI example: -picoapi: - key: abcdef1234567890 +[picoapi] +key = abcdef1234567890 # Azure example: -azure: - apikey: abcdef1234567890 - location: westeurope +[azure] +apikey = abcdef1234567890 +location = westeurope Currently only pony mail is supported. more to come. """ @@ -38,8 +38,10 @@ import requests +from kibble.configuration import conf -def trimBody(body): + +def trim_body(body): """ Quick function for trimming away the fat from emails """ # Cut away "On $date, jane doe wrote: " kind of texts body = re.sub( @@ -62,108 +64,104 @@ def trimBody(body): return body -def azureKPE(KibbleBit, bodies): +def azure_kpe(kibble_bit, bodies): """ KPE using Azure Text Analysis API """ - if "azure" in KibbleBit.config: - headers = { - "Content-Type": "application/json", - "Ocp-Apim-Subscription-Key": KibbleBit.config["azure"]["apikey"], - } - - js = {"documents": []} - - # For each body... - a = 0 - KPEs = [] - for body in bodies: - # Crop out quotes - body = trimBody(body) - doc = {"language": "en", "id": str(a), "text": body} - js["documents"].append(doc) - KPEs.append({}) # placeholder for each doc, to be replaced - a += 1 - try: - rv = requests.post( - "https://%s.api.cognitive.microsoft.com/text/analytics/v2.0/keyPhrases" - % KibbleBit.config["azure"]["location"], - headers=headers, - data=json.dumps(js), - ) - jsout = rv.json() - except: - jsout = {} # borked sentiment analysis? - - if "documents" in jsout and len(jsout["documents"]) > 0: - for doc in jsout["documents"]: - KPEs[int(doc["id"])] = doc["keyPhrases"][ - :5 - ] # Replace KPEs[X] with the actual phrases, 5 first ones. - - else: - KibbleBit.pprint("Failed to analyze email body.") - print(jsout) - # Depending on price tier, Azure will return a 429 if you go too fast. - # If we see a statusCode return, let's just stop for now. - # Later scans can pick up the slack. - if "statusCode" in jsout: - KibbleBit.pprint("Possible rate limiting in place, stopping for now.") - return False - return KPEs - - -def picoKPE(KibbleBit, bodies): + headers = { + "Content-Type": "application/json", + "Ocp-Apim-Subscription-Key": conf.get("azure", "apikey"), + } + + js = {"documents": []} + + # For each body... + a = 0 + KPEs = [] + for body in bodies: + # Crop out quotes + body = trim_body(body) + doc = {"language": "en", "id": str(a), "text": body} + js["documents"].append(doc) + KPEs.append({}) # placeholder for each doc, to be replaced + a += 1 + try: + rv = requests.post( + "https://%s.api.cognitive.microsoft.com/text/analytics/v2.0/keyPhrases" + % conf.get("azure", "location"), + headers=headers, + data=json.dumps(js), + ) + jsout = rv.json() + except: + jsout = {} # borked sentiment analysis? + + if "documents" in jsout and len(jsout["documents"]) > 0: + for doc in jsout["documents"]: + KPEs[int(doc["id"])] = doc["keyPhrases"][ + :5 + ] # Replace KPEs[X] with the actual phrases, 5 first ones. + + else: + kibble_bit.pprint("Failed to analyze email body.") + print(jsout) + # Depending on price tier, Azure will return a 429 if you go too fast. + # If we see a statusCode return, let's just stop for now. + # Later scans can pick up the slack. + if "statusCode" in jsout: + kibble_bit.pprint("Possible rate limiting in place, stopping for now.") + return False + return KPEs + + +def pico_kpe(kibble_bit, bodies): """ KPE using picoAPI Text Analysis """ - if "picoapi" in KibbleBit.config: - headers = { - "Content-Type": "application/json", - "PicoAPI-Key": KibbleBit.config["picoapi"]["key"], - } - - js = {"texts": []} - - # For each body... - a = 0 - KPEs = [] - for body in bodies: - body = trimBody(body) - - doc = {"id": str(a), "body": body} - js["texts"].append(doc) - KPEs.append({}) # placeholder for each doc, to be replaced - a += 1 - try: - rv = requests.post( - "https://v1.picoapi.com/api/text/keyphrase", - headers=headers, - data=json.dumps(js), - ) - jsout = rv.json() - except: - jsout = {} # borked sentiment analysis? - - if "results" in jsout and len(jsout["results"]) > 0: - for doc in jsout["results"]: - phrases = [] - # This is a bit different than Azure, in that it has a weighting score - # So we need to just extract key phrases above a certain level. - # Grab up o 5 key phrases per text - MINIMUM_WEIGHT = 0.02 - for element in doc["keyphrases"]: - if element["score"] > MINIMUM_WEIGHT: - phrases.append(element["phrase"]) - if len(phrases) == 5: - break - KPEs[ - int(doc["id"]) - ] = phrases # Replace KPEs[X] with the actual phrases - - else: - KibbleBit.pprint("Failed to analyze email body.") - print(jsout) - # 403 returned on invalid key, 429 on rate exceeded. - # If we see a code return, let's just stop for now. - # Later scans can pick up the slack. - if "code" in jsout: - KibbleBit.pprint("Possible rate limiting in place, stopping for now.") - return False - return KPEs + headers = { + "Content-Type": "application/json", + "PicoAPI-Key": conf.get("picoapi", "key"), + } + + js = {"texts": []} + + # For each body... + a = 0 + KPEs = [] + for body in bodies: + body = trim_body(body) + + doc = {"id": str(a), "body": body} + js["texts"].append(doc) + KPEs.append({}) # placeholder for each doc, to be replaced + a += 1 + try: + rv = requests.post( + "https://v1.picoapi.com/api/text/keyphrase", + headers=headers, + data=json.dumps(js), + ) + jsout = rv.json() + except: + jsout = {} # borked sentiment analysis? + + if "results" in jsout and len(jsout["results"]) > 0: + for doc in jsout["results"]: + phrases = [] + # This is a bit different than Azure, in that it has a weighting score + # So we need to just extract key phrases above a certain level. + # Grab up o 5 key phrases per text + MINIMUM_WEIGHT = 0.02 + for element in doc["keyphrases"]: + if element["score"] > MINIMUM_WEIGHT: + phrases.append(element["phrase"]) + if len(phrases) == 5: + break + KPEs[int(doc["id"])] = phrases # Replace KPEs[X] with the actual phrases + + else: + kibble_bit.pprint("Failed to analyze email body.") + print(jsout) + # 403 returned on invalid key, 429 on rate exceeded. + # If we see a code return, let's just stop for now. + # Later scans can pick up the slack. + if "code" in jsout: + kibble_bit.pprint("Possible rate limiting in place, stopping for now.") + return False + return KPEs diff --git a/kibble/scanners/utils/tone.py b/kibble/scanners/utils/tone.py index c920b5ed..df480a40 100644 --- a/kibble/scanners/utils/tone.py +++ b/kibble/scanners/utils/tone.py @@ -18,12 +18,12 @@ """ This is an experimental tone analyzer plugin for using Watson/BlueMix for analyzing the mood of email on a list. This requires a Watson account -and a watson section in config.yaml, as such: +and a watson section in kibble.ini, as such: -watson: - username: $user - password: $pass - api: https://$something.watsonplatform.net/tone-analyzer/api +[watson] +username = $user +password = $pass +api = https://$something.watsonplatform.net/tone-analyzer/api Currently only pony mail is supported. more to come. """ @@ -32,160 +32,159 @@ import requests +from kibble.configuration import conf -def watsonTone(KibbleBit, bodies): - """ Sentiment analysis using IBM Watson """ - if "watson" in KibbleBit.config: - headers = {"Content-Type": "application/json"} - # Crop out quotes - for body in bodies: - lines = body.split("\n") - body = "\n".join([x for x in lines if not x.startswith(">")]) - - js = {"text": body} - try: - rv = requests.post( - "%s/v3/tone?version=2017-09-21&sentences=false" - % KibbleBit.config["watson"]["api"], - headers=headers, - data=json.dumps(js), - auth=( - KibbleBit.config["watson"]["username"], - KibbleBit.config["watson"]["password"], - ), - ) - jsout = rv.json() - except: - jsout = {} # borked Watson? - mood = {} - if "document_tone" in jsout: - for tone in jsout["document_tone"]["tones"]: - mood[tone["tone_id"]] = tone["score"] - else: - KibbleBit.pprint("Failed to analyze email body.") - yield mood +def watson_tone(kibble_bit, bodies): + """ Sentiment analysis using IBM Watson """ + headers = {"Content-Type": "application/json"} + # Crop out quotes + for body in bodies: + lines = body.split("\n") + body = "\n".join([x for x in lines if not x.startswith(">")]) -def azureTone(KibbleBit, bodies): - """ Sentiment analysis using Azure Text Analysis API """ - if "azure" in KibbleBit.config: - headers = { - "Content-Type": "application/json", - "Ocp-Apim-Subscription-Key": KibbleBit.config["azure"]["apikey"], - } - - js = {"documents": []} - - # For each body... - a = 0 - moods = [] - for body in bodies: - # Crop out quotes - lines = body.split("\n") - body = "\n".join([x for x in lines if not x.startswith(">")]) - doc = {"language": "en", "id": str(a), "text": body} - js["documents"].append(doc) - moods.append({}) # placeholder for each doc, to be replaced - a += 1 + js = {"text": body} try: rv = requests.post( - "https://%s.api.cognitive.microsoft.com/text/analytics/v2.0/sentiment" - % KibbleBit.config["azure"]["location"], + "%s/v3/tone?version=2017-09-21&sentences=false" + % conf.get("watson", "api"), headers=headers, data=json.dumps(js), + auth=( + conf.get("watson", "username"), + conf.get("watson", "password"), + ), ) jsout = rv.json() except: - jsout = {} # borked sentiment analysis? - - if "documents" in jsout and len(jsout["documents"]) > 0: - for doc in jsout["documents"]: - mood = {} - # This is more parred than Watson, so we'll split it into three groups: positive, neutral and negative. - # Divide into four segments, 0->40%, 25->75% and 60->100%. - # 0-40 promotes negative, 60-100 promotes positive, and 25-75% promotes neutral. - # As we don't want to over-represent negative/positive where the results are - # muddy, the neutral zone is larger than the positive/negative zones by 10%. - val = doc["score"] - mood["negative"] = max( - 0, ((0.4 - val) * 2.5) - ) # For 40% and below, use 2½ distance - mood["positive"] = max( - 0, ((val - 0.6) * 2.5) - ) # For 60% and above, use 2½ distance - mood["neutral"] = max( - 0, 1 - (abs(val - 0.5) * 2) - ) # Between 25% and 75% use double the distance to middle. - moods[int(doc["id"])] = mood # Replace moods[X] with the actual mood - + jsout = {} # borked Watson? + mood = {} + if "document_tone" in jsout: + for tone in jsout["document_tone"]["tones"]: + mood[tone["tone_id"]] = tone["score"] else: - KibbleBit.pprint("Failed to analyze email body.") - print(jsout) - # Depending on price tier, Azure will return a 429 if you go too fast. - # If we see a statusCode return, let's just stop for now. - # Later scans can pick up the slack. - if "statusCode" in jsout: - KibbleBit.pprint("Possible rate limiting in place, stopping for now.") - return False - return moods - - -def picoTone(KibbleBit, bodies): - """ Sentiment analysis using picoAPI Text Analysis """ - if "picoapi" in KibbleBit.config: - headers = { - "Content-Type": "application/json", - "PicoAPI-Key": KibbleBit.config["picoapi"]["key"], - } - - js = {"texts": []} - - # For each body... - a = 0 - moods = [] - for body in bodies: - # Crop out quotes - lines = body.split("\n") - body = "\n".join([x for x in lines if not x.startswith(">")]) - doc = {"id": str(a), "body": body} - js["texts"].append(doc) - moods.append({}) # placeholder for each doc, to be replaced - a += 1 - try: - rv = requests.post( - "https://v1.picoapi.com/api/text/sentiment", - headers=headers, - data=json.dumps(js), - ) - jsout = rv.json() - except: - jsout = {} # borked sentiment analysis? + kibble_bit.pprint("Failed to analyze email body.") + yield mood - if "results" in jsout and len(jsout["results"]) > 0: - for doc in jsout["results"]: - mood = { - "negative": doc["negativity"], - "positive": doc["positivity"], - "neutral": doc["neutrality"], - } - # Sentiment is the overall score, and we use that for the neutrality of a text +def azure_tone(kibble_bit, bodies): + """ Sentiment analysis using Azure Text Analysis API """ + headers = { + "Content-Type": "application/json", + "Ocp-Apim-Subscription-Key": conf.get("azure", "apikey"), + } + + js = {"documents": []} - # Additional (optional) emotion weighting - if "emotions" in doc: - for k, v in doc["emotions"].items(): - mood[k] = v / 100 # Value is between 0 and 100. + # For each body... + a = 0 + moods = [] + for body in bodies: + # Crop out quotes + lines = body.split("\n") + body = "\n".join([x for x in lines if not x.startswith(">")]) + doc = {"language": "en", "id": str(a), "text": body} + js["documents"].append(doc) + moods.append({}) # placeholder for each doc, to be replaced + a += 1 + try: + rv = requests.post( + "https://%s.api.cognitive.microsoft.com/text/analytics/v2.0/sentiment" + % conf.get("azure", "location"), + headers=headers, + data=json.dumps(js), + ) + jsout = rv.json() + except: + jsout = {} # borked sentiment analysis? + + if "documents" in jsout and len(jsout["documents"]) > 0: + for doc in jsout["documents"]: + mood = {} + # This is more parred than Watson, so we'll split it into three groups: positive, neutral and negative. + # Divide into four segments, 0->40%, 25->75% and 60->100%. + # 0-40 promotes negative, 60-100 promotes positive, and 25-75% promotes neutral. + # As we don't want to over-represent negative/positive where the results are + # muddy, the neutral zone is larger than the positive/negative zones by 10%. + val = doc["score"] + mood["negative"] = max( + 0, ((0.4 - val) * 2.5) + ) # For 40% and below, use 2½ distance + mood["positive"] = max( + 0, ((val - 0.6) * 2.5) + ) # For 60% and above, use 2½ distance + mood["neutral"] = max( + 0, 1 - (abs(val - 0.5) * 2) + ) # Between 25% and 75% use double the distance to middle. + moods[int(doc["id"])] = mood # Replace moods[X] with the actual mood + + else: + kibble_bit.pprint("Failed to analyze email body.") + print(jsout) + # Depending on price tier, Azure will return a 429 if you go too fast. + # If we see a statusCode return, let's just stop for now. + # Later scans can pick up the slack. + if "statusCode" in jsout: + kibble_bit.pprint("Possible rate limiting in place, stopping for now.") + return False + return moods + + +def pico_tone(kibble_bit, bodies): + """ Sentiment analysis using picoAPI Text Analysis """ + headers = { + "Content-Type": "application/json", + "PicoAPI-Key": conf.get("picoapi", "key"), + } - moods[int(doc["id"])] = mood # Replace moods[X] with the actual mood + js = {"texts": []} - else: - KibbleBit.pprint("Failed to analyze email body.") - print(jsout) - # 403 returned on invalid key, 429 on rate exceeded. - # If we see a code return, let's just stop for now. - # Later scans can pick up the slack. - if "code" in jsout: - KibbleBit.pprint("Possible rate limiting in place, stopping for now.") - return False - return moods + # For each body... + a = 0 + moods = [] + for body in bodies: + # Crop out quotes + lines = body.split("\n") + body = "\n".join([x for x in lines if not x.startswith(">")]) + doc = {"id": str(a), "body": body} + js["texts"].append(doc) + moods.append({}) # placeholder for each doc, to be replaced + a += 1 + try: + rv = requests.post( + "https://v1.picoapi.com/api/text/sentiment", + headers=headers, + data=json.dumps(js), + ) + jsout = rv.json() + except: + jsout = {} # borked sentiment analysis? + + if "results" in jsout and len(jsout["results"]) > 0: + for doc in jsout["results"]: + mood = { + "negative": doc["negativity"], + "positive": doc["positivity"], + "neutral": doc["neutrality"], + } + + # Sentiment is the overall score, and we use that for the neutrality of a text + + # Additional (optional) emotion weighting + if "emotions" in doc: + for k, v in doc["emotions"].items(): + mood[k] = v / 100 # Value is between 0 and 100. + + moods[int(doc["id"])] = mood # Replace moods[X] with the actual mood + + else: + kibble_bit.pprint("Failed to analyze email body.") + print(jsout) + # 403 returned on invalid key, 429 on rate exceeded. + # If we see a code return, let's just stop for now. + # Later scans can pick up the slack. + if "code" in jsout: + kibble_bit.pprint("Possible rate limiting in place, stopping for now.") + return False + return moods diff --git a/kibble/settings.py b/kibble/settings.py index d165bb21..b9c40039 100644 --- a/kibble/settings.py +++ b/kibble/settings.py @@ -17,6 +17,8 @@ import os +from kibble.configuration import conf + YAML_DIRECTORY = os.path.join( os.path.dirname(os.path.realpath(__file__)), "api", "yaml" ) @@ -24,3 +26,7 @@ MAPPING_DIRECTORY = os.path.join( os.path.dirname(os.path.realpath(__file__)), "mappings" ) + +WATSON_ENABLED = bool(conf.get("watson", "username", fallback=None)) +AZURE_ENABLED = bool(conf.get("azure", "apikey", fallback=None)) +PICOAPI_ENABLED = bool(conf.get("picoapi", "key", fallback=None)) From 96959acec06fed4d91d5da73fee1aa1200ffbb3c Mon Sep 17 00:00:00 2001 From: Tomek Urbaszek Date: Wed, 16 Dec 2020 23:04:45 +0000 Subject: [PATCH 46/48] Introduce a BaseScanner class (#121) This will help us refactor and standarize all scanners. Additionally this class create a good place to have common method which are deuplicated in many places. --- kibble/scanners/scanners/base_scanner.py | 35 ++++++++++++++++++++++++ 1 file changed, 35 insertions(+) create mode 100644 kibble/scanners/scanners/base_scanner.py diff --git a/kibble/scanners/scanners/base_scanner.py b/kibble/scanners/scanners/base_scanner.py new file mode 100644 index 00000000..9f52a082 --- /dev/null +++ b/kibble/scanners/scanners/base_scanner.py @@ -0,0 +1,35 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import logging +from abc import abstractmethod + +from kibble.scanners.brokers.kibbleES import KibbleBit + + +class BaseScanner: + """ + Base scanner class. All scanners should inherit from it. + """ + + version: str + title: str + log = logging.getLogger(__name__) + + @abstractmethod + def scan(self, kibble_bit: KibbleBit, source: dict) -> None: + raise NotImplementedError From dee60e4a6c218f5d559af584b28c8dfb3a1b5d3e Mon Sep 17 00:00:00 2001 From: Tomasz Urbaszek Date: Thu, 17 Dec 2020 01:14:07 +0100 Subject: [PATCH 47/48] Refactor sloc scanner to be a class --- kibble/scanners/scanners/base_scanner.py | 11 ++++- kibble/scanners/scanners/git-sloc.py | 61 +++++++++++++----------- 2 files changed, 42 insertions(+), 30 deletions(-) diff --git a/kibble/scanners/scanners/base_scanner.py b/kibble/scanners/scanners/base_scanner.py index 9f52a082..b96ed91d 100644 --- a/kibble/scanners/scanners/base_scanner.py +++ b/kibble/scanners/scanners/base_scanner.py @@ -30,6 +30,15 @@ class BaseScanner: title: str log = logging.getLogger(__name__) + def __init__(self, kibble_bit: KibbleBit, source: dict): + self.kibble_bit = kibble_bit + self.source = source + + @abstractmethod + @property + def accepts(self) -> bool: + raise NotImplementedError + @abstractmethod - def scan(self, kibble_bit: KibbleBit, source: dict) -> None: + def scan(self) -> None: raise NotImplementedError diff --git a/kibble/scanners/scanners/git-sloc.py b/kibble/scanners/scanners/git-sloc.py index 4ae13ac4..76d1c2b9 100644 --- a/kibble/scanners/scanners/git-sloc.py +++ b/kibble/scanners/scanners/git-sloc.py @@ -20,35 +20,39 @@ import time from kibble.configuration import conf +from kibble.scanners.scanners.base_scanner import BaseScanner from kibble.scanners.utils import git, sloc -""" Source Lines of Code counter for Git """ -title = "SloC Counter for Git" -version = "0.1.0" +class GitSlocScanner(BaseScanner): + """Source Lines of Code counter for Git""" + title = "SloC Counter for Git" + version = "0.1.0" -def accepts(source): - """ Do we accept this source? """ - if source["type"] == "git": - return True - # There are cases where we have a github repo, but don't wanna analyze the code, just issues - if source["type"] == "github" and source.get("issuesonly", False) == False: - return True - return False + @property + def accepts(self): + """ Do we accept this source? """ + if self.source["type"] == "git": + return True + # There are cases where we have a github repo, but don't wanna analyze the code, just issues + if self.source["type"] == "github" and self.source.get("issuesonly"): + return True + return False + def scan(self): + source = self.source + source_id = source["sourceID"] -def scan(kibble_bit, source): + url = source["sourceURL"] + root_path = ( + f'{conf.get("scanner", "scratchdir")}/{source["organisation"]}/{git}' + ) + gpath = os.path.join(root_path, source_id) - rid = source["sourceID"] - url = source["sourceURL"] - rootpath = "%s/%s/git" % ( - conf.get("scanner", "scratchdir"), - source["organisation"], - ) - gpath = os.path.join(rootpath, rid) + if not source["steps"]["sync"]["good"] or not os.path.exists(gpath): + return - if source["steps"]["sync"]["good"] and os.path.exists(gpath): source["steps"]["count"] = { "time": time.time(), "status": "SLoC count started at " @@ -56,28 +60,27 @@ def scan(kibble_bit, source): "running": True, "good": True, } - kibble_bit.update_source(source) + self.kibble_bit.update_source(source) try: branch = git.default_branch(source, gpath) subprocess.call("cd %s && git checkout %s" % (gpath, branch), shell=True) except: # pylint: disable=bare-except - kibble_bit.pprint("SLoC counter failed to find main branch for %s!!" % url) + self.log.error("SLoC counter failed to find main branch for %s", url) return False - kibble_bit.pprint("Running SLoC count for %s" % url) - languages, codecount, comment, blank, years, cost = sloc.count(gpath) + self.log.info("Running SLoC count for %s", url) + languages, code_count, comment, blank, years, cost = sloc.count(gpath) - sloc_ = { - "sourceID": source["sourceID"], - "loc": codecount, + source["sloc"] = { + "sourceID": source_id, + "loc": code_count, "comments": comment, "blanks": blank, "years": years, "cost": cost, "languages": languages, } - source["sloc"] = sloc_ source["steps"]["count"] = { "time": time.time(), "status": "SLoC count completed at " @@ -85,4 +88,4 @@ def scan(kibble_bit, source): "running": False, "good": True, } - kibble_bit.update_source(source) + self.kibble_bit.update_source(source) From a3a7a8341d969f8632d386207b124cdb332f993e Mon Sep 17 00:00:00 2001 From: Tomasz Urbaszek Date: Fri, 18 Dec 2020 19:48:35 +0100 Subject: [PATCH 48/48] fixup! Refactor sloc scanner to be a class --- NOTICE | 27 +++++++++++ kibble.ini | 2 +- kibble/cli/scanner_command.py | 61 +++++++++++++++--------- kibble/scanners/brokers/kibbleES.py | 9 ++-- kibble/scanners/scanners/__init__.py | 19 ++++++-- kibble/scanners/scanners/base_scanner.py | 9 ++-- kibble/scanners/scanners/git-sloc.py | 17 +++++-- kibble/scanners/scanners/github-stats.py | 4 +- setup.py | 1 + 9 files changed, 105 insertions(+), 44 deletions(-) diff --git a/NOTICE b/NOTICE index 66e7dc12..7ab700c5 100644 --- a/NOTICE +++ b/NOTICE @@ -186,3 +186,30 @@ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +------------------------------------------------------------------------ +Loguru (MIT License) +------------------------------------------------------------------------ + +MIT License + +Copyright (c) 2017 + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/kibble.ini b/kibble.ini index 8e4f042a..0f7c5347 100644 --- a/kibble.ini +++ b/kibble.ini @@ -23,7 +23,7 @@ scratchdir = /tmp # how many nodes are working, and which one you are, # using the format: $nodeNo/$totalNodes. If there are 4 nodes, # each node will gat 1/4th of all jobs to work on. -balance = +balance = 1/1 [git] # Comma-separated branch names diff --git a/kibble/cli/scanner_command.py b/kibble/cli/scanner_command.py index 2388c0ce..d08181b2 100644 --- a/kibble/cli/scanner_command.py +++ b/kibble/cli/scanner_command.py @@ -14,12 +14,14 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. - import multiprocessing import threading import time +from inspect import isclass from typing import List +from loguru import logger + from kibble.configuration import conf from kibble.scanners.brokers import kibbleES @@ -27,7 +29,7 @@ BIG_LOCK = threading.Lock() -def is_mine(id_): +def is_mine(id_: str): balance = conf.get("scanner", "balance") if not balance: return False @@ -55,7 +57,7 @@ def __init__(self, broker, org, i, t=None, e=None): self.bit = self.broker.bitClass(self.broker, self.org, i) self.stype = t self.exclude = e - print("Initialized thread %i" % i) + logger.info("Initialized thread {}", i) def run(self): from kibble.scanners import scanners @@ -67,25 +69,40 @@ def run(self): BIG_LOCK.acquire(blocking=True) try: # Try grabbing an object (might not be any left!) - obj = PENDING_OBJECTS.pop(0) + try: + obj = PENDING_OBJECTS.pop(0) + except IndexError: + break + # If load balancing jobs, make sure this one is ours - if is_mine(obj["sourceID"]): - # Run through list of scanners in order, apply when useful - for sid, scanner in scanners.enumerate(): - if scanner.accepts(obj): - self.bit.pluginname = "plugins/scanners/" + sid - # Excluded scanner type? - if self.exclude and sid in self.exclude: - continue - # Specific scanner type or no types mentioned? - if not self.stype or self.stype == sid: - scanner.scan(self.bit, obj) - except: - break + if not is_mine(obj["sourceID"]): + continue + # Run through list of scanners in order, apply when useful + for sid, scanner_class_or_mod in scanners.enumerate(): + if self.exclude and sid in self.exclude: + continue + + # Specific scanner type or no types mentioned? + if self.stype and self.stype != sid: + continue + + self.bit.pluginname = "plugins/scanners/" + sid + + if isclass(scanner_class_or_mod): + scanner = scanner_class_or_mod(kibble_bit=self.bit, source=obj) + logger.info("Doing scan for {}", scanner.title) + if scanner.accepts: + scanner.scan() + else: + logger.info("Doing scan for {}", scanner_class_or_mod.title) + if scanner_class_or_mod.accepts(obj): + scanner_class_or_mod.scan(self.bit, obj) + except Exception: + logger.exception("An error occurred when scanning.") finally: BIG_LOCK.release() self.bit.pluginname = "core" - self.bit.pprint("No more objects, exiting!") + logger.info("No more objects, exiting!") def scan_cmd( @@ -98,15 +115,15 @@ def scan_cmd( ): global PENDING_OBJECTS - print("Kibble Scanner starting") - print("Using direct ElasticSearch broker model") + logger.info("Kibble Scanner starting") + logger.info("Using direct ElasticSearch broker model") broker = kibbleES.Broker() org_no = 0 source_no = 0 for org_item in broker.organisations(): if not org or org == org_item.id: - print(f"Processing organisation {org_item.id}") + logger.info(f"Processing organisation {org_item.id}") org_no += 1 # Compile source list @@ -145,6 +162,6 @@ def scan_cmd( for t in threads: t.join() - print( + logger.info( f"All done scanning for now, found {org_no} organisations and {source_no} sources to process." ) diff --git a/kibble/scanners/brokers/kibbleES.py b/kibble/scanners/brokers/kibbleES.py index 5ee92d29..b72804e0 100644 --- a/kibble/scanners/brokers/kibbleES.py +++ b/kibble/scanners/brokers/kibbleES.py @@ -20,6 +20,7 @@ import elasticsearch import elasticsearch.helpers +from loguru import logger from kibble.configuration import conf @@ -125,7 +126,7 @@ def __init__(self, broker, organisation, tid): self.queueMax = 1000 # Entries to keep before bulk pushing self.pluginname = "" self.tid = tid - self.dbname = conf.get("elasticsearch", "database") + self.dbname = conf.get("elasticsearch", "dbname") def __del__(self): """ On unload/delete, push the last chunks of data to ES """ @@ -136,9 +137,9 @@ def __del__(self): def pprint(self, string, err=False): line = "[thread#%i:%s]: %s" % (self.tid, self.pluginname, string) if err: - sys.stderr.write(line + "\n") + logger.warning(line) else: - print(line) + logger.info(line) def update_source(self, source): """ Updates a source document, usually with a status update """ @@ -232,7 +233,7 @@ def __init__(self, broker, org): self.broker = broker self.id = org - self.dbname = conf.get("elasticsearch", "database") + self.dbname = conf.get("elasticsearch", "dbname") def sources(self, sourceType=None, view=None): """ Get all sources or sources of a specific type for an org """ diff --git a/kibble/scanners/scanners/__init__.py b/kibble/scanners/scanners/__init__.py index 771ba57d..b1f929e9 100644 --- a/kibble/scanners/scanners/__init__.py +++ b/kibble/scanners/scanners/__init__.py @@ -22,6 +22,8 @@ import importlib +from loguru import logger + # Define, in order of priority, all scanner plugins we have __all__ = [ "git-sync", # This needs to precede other VCS scanners! @@ -46,12 +48,19 @@ scanners = {} for p in __all__: - scanner = importlib.import_module("kibble.scanners.scanners.%s" % p) + scanner_mod = importlib.import_module("kibble.scanners.scanners.%s" % p) + # New case + if hasattr(scanner_mod, "scanner"): + scanner = getattr(scanner_mod, "scanner") + else: + scanner = scanner_mod + scanners[p] = scanner - # This should ideally be pprint, meh - print( - "[core]: Loaded plugins/scanners/%s v/%s (%s)" - % (p, scanner.version, scanner.title) + logger.info( + "[core]: Loaded plugins/scanners/{} v/{} ({})", + p, + scanner.version, + scanner.title, ) diff --git a/kibble/scanners/scanners/base_scanner.py b/kibble/scanners/scanners/base_scanner.py index b96ed91d..3292f751 100644 --- a/kibble/scanners/scanners/base_scanner.py +++ b/kibble/scanners/scanners/base_scanner.py @@ -15,9 +15,10 @@ # specific language governing permissions and limitations # under the License. -import logging from abc import abstractmethod +from loguru import logger + from kibble.scanners.brokers.kibbleES import KibbleBit @@ -28,17 +29,17 @@ class BaseScanner: version: str title: str - log = logging.getLogger(__name__) + log = logger def __init__(self, kibble_bit: KibbleBit, source: dict): self.kibble_bit = kibble_bit self.source = source - @abstractmethod @property + @abstractmethod def accepts(self) -> bool: raise NotImplementedError - + @abstractmethod def scan(self) -> None: raise NotImplementedError diff --git a/kibble/scanners/scanners/git-sloc.py b/kibble/scanners/scanners/git-sloc.py index 76d1c2b9..66802c4c 100644 --- a/kibble/scanners/scanners/git-sloc.py +++ b/kibble/scanners/scanners/git-sloc.py @@ -45,12 +45,16 @@ def scan(self): source_id = source["sourceID"] url = source["sourceURL"] - root_path = ( - f'{conf.get("scanner", "scratchdir")}/{source["organisation"]}/{git}' - ) + root_path = f'{conf.get("scanner", "scratchdir")}/{source["organisation"]}/git' gpath = os.path.join(root_path, source_id) - if not source["steps"]["sync"]["good"] or not os.path.exists(gpath): + steps = source["steps"] + if "sync" in steps and not steps["sync"]["good"]: + self.log.warning("Scanning skipped") + return + + if not os.path.exists(gpath): + self.log.warning("Scanning skipped, path '{}' does not exists", gpath) return source["steps"]["count"] = { @@ -69,7 +73,7 @@ def scan(self): self.log.error("SLoC counter failed to find main branch for %s", url) return False - self.log.info("Running SLoC count for %s", url) + self.log.info("Running SLoC count for {}", url) languages, code_count, comment, blank, years, cost = sloc.count(gpath) source["sloc"] = { @@ -89,3 +93,6 @@ def scan(self): "good": True, } self.kibble_bit.update_source(source) + + +scanner = GitSlocScanner diff --git a/kibble/scanners/scanners/github-stats.py b/kibble/scanners/scanners/github-stats.py index ed4eda14..267d55bc 100644 --- a/kibble/scanners/scanners/github-stats.py +++ b/kibble/scanners/scanners/github-stats.py @@ -27,9 +27,7 @@ def accepts(source): """ Do we accept this source? """ - if source["type"] == "github": - return True - return False + return source["type"] == "github" def get_time(string): diff --git a/setup.py b/setup.py index 044da496..aa26d814 100644 --- a/setup.py +++ b/setup.py @@ -36,6 +36,7 @@ "click==7.1.2", "elasticsearch==7.9.1", "gunicorn==20.0.4", + "loguru==0.5.3", "psutil==5.7.3", "python-dateutil==2.8.1", "python-twitter==3.5",