diff --git a/datadog_checks_base/tests/test_metadata.py b/datadog_checks_base/tests/test_metadata.py index f2de6cd886172..8e028fed1721f 100644 --- a/datadog_checks_base/tests/test_metadata.py +++ b/datadog_checks_base/tests/test_metadata.py @@ -59,8 +59,8 @@ def test_encoding(self): constructor = ensure_bytes finalizer = ensure_unicode - name = constructor(u'nam\u00E9') - value = constructor(u'valu\u00E9') + name = constructor(u'nam\u00e9') + value = constructor(u'valu\u00e9') with mock.patch(SET_CHECK_METADATA_METHOD) as m: check.set_metadata(name, value) diff --git a/datadog_checks_dev/datadog_checks/dev/plugin/pytest.py b/datadog_checks_dev/datadog_checks/dev/plugin/pytest.py index f9eaaf19b38a0..6e5989b2ab0e0 100644 --- a/datadog_checks_dev/datadog_checks/dev/plugin/pytest.py +++ b/datadog_checks_dev/datadog_checks/dev/plugin/pytest.py @@ -320,7 +320,7 @@ def enum_object_items(data_source, machine_name, object_name, detail_level): counters = {} for object_name, data in perf_objects.items(): instances, counter_values = data - instance_counts = {instance_name: 0 for instance_name in instances} + instance_counts = dict.fromkeys(instances, 0) instance_indices = [] for instance_name in instances: instance_indices.append(instance_counts[instance_name]) diff --git a/datadog_checks_dev/datadog_checks/dev/testing.py b/datadog_checks_dev/datadog_checks/dev/testing.py index 437e873aaccd2..5fe0e66f3f692 100644 --- a/datadog_checks_dev/datadog_checks/dev/testing.py +++ b/datadog_checks_dev/datadog_checks/dev/testing.py @@ -2,8 +2,7 @@ # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) -""" This module contains test annotations -""" +"""This module contains test annotations""" import pytest from .utils import ON_MACOS, ON_WINDOWS diff --git a/ddev/changelog.d/20312.added b/ddev/changelog.d/20312.added new file mode 100644 index 0000000000000..1a197c8f33ac8 --- /dev/null +++ b/ddev/changelog.d/20312.added @@ -0,0 +1 @@ +Update style dependencies. \ No newline at end of file diff --git a/ddev/src/ddev/plugin/external/hatch/environment_collector.py b/ddev/src/ddev/plugin/external/hatch/environment_collector.py index 56cbd1bfd298b..fe2e0b0e6a429 100644 --- a/ddev/src/ddev/plugin/external/hatch/environment_collector.py +++ b/ddev/src/ddev/plugin/external/hatch/environment_collector.py @@ -144,8 +144,8 @@ def get_initial_config(self): }, # We pin deps in order to make CI more stable/reliable. 'dependencies': [ - 'black==24.10.0', - 'ruff==0.8.0', + 'black==25.1.0', + 'ruff==0.11.10', # Keep in sync with: /datadog_checks_base/pyproject.toml 'pydantic==2.10.6', ], diff --git a/dns_check/tests/mocks.py b/dns_check/tests/mocks.py index 4d33ed376366f..3c5c33e0b2ef2 100644 --- a/dns_check/tests/mocks.py +++ b/dns_check/tests/mocks.py @@ -17,7 +17,7 @@ def __init__(self, address): else: items = [MockDNSAnswer.MockItem(address)] - self.items = {item: None for item in items} + self.items = dict.fromkeys(items) class MockItem: def __init__(self, address): diff --git a/haproxy/datadog_checks/haproxy/legacy/haproxy.py b/haproxy/datadog_checks/haproxy/legacy/haproxy.py index f34bd9de9dd90..5d6dfa84981ac 100644 --- a/haproxy/datadog_checks/haproxy/legacy/haproxy.py +++ b/haproxy/datadog_checks/haproxy/legacy/haproxy.py @@ -453,7 +453,7 @@ def _normalize_status(status): return formatted_status def _process_backend_hosts_metric(self, active_tag=None): - agg_statuses = defaultdict(lambda: {status: 0 for status in Services.COLLATED_STATUSES}) + agg_statuses = defaultdict(lambda: dict.fromkeys(Services.COLLATED_STATUSES, 0)) active_tag = [] if active_tag is None else active_tag for host_status, count in self.hosts_statuses.items(): @@ -493,7 +493,7 @@ def _process_status_metric( self, active_tag=None, ): - agg_statuses_counter = defaultdict(lambda: {status: 0 for status in Services.COLLATED_STATUSES}) + agg_statuses_counter = defaultdict(lambda: dict.fromkeys(Services.COLLATED_STATUSES, 0)) active_tag = [] if active_tag is None else active_tag # Initialize `statuses_counter`: every value is a defaultdict initialized with the correct # keys, which depends on the `collate_status_tags_per_host` option diff --git a/http_check/tests/conftest.py b/http_check/tests/conftest.py index 96f2e915836a4..f34387c09de12 100644 --- a/http_check/tests/conftest.py +++ b/http_check/tests/conftest.py @@ -43,7 +43,7 @@ def call_endpoint(url): @pytest.fixture(scope='session') def mock_local_http_dns(): - mapping = {x: ('127.0.0.1', 443) for x in MOCKED_HOSTS} + mapping = dict.fromkeys(MOCKED_HOSTS, ('127.0.0.1', 443)) with mock_local(mapping): yield diff --git a/kubelet/datadog_checks/kubelet/kubelet.py b/kubelet/datadog_checks/kubelet/kubelet.py index b028e067609c3..acc60f6d781aa 100644 --- a/kubelet/datadog_checks/kubelet/kubelet.py +++ b/kubelet/datadog_checks/kubelet/kubelet.py @@ -228,13 +228,13 @@ def __init__(self, name, init_config, instances): self.probes_scraper_config = self.get_scraper_config(probes_instance) - counter_transformers = {k: self.send_always_counter for k in self.COUNTER_METRICS} + counter_transformers = dict.fromkeys(self.COUNTER_METRICS, self.send_always_counter) histogram_transformers = { k: self._histogram_from_seconds_to_microseconds(v) for k, v in TRANSFORM_VALUE_HISTOGRAMS.items() } - volume_metric_transformers = {k: self.append_pod_tags_to_volume_metrics for k in self.VOLUME_METRICS} + volume_metric_transformers = dict.fromkeys(self.VOLUME_METRICS, self.append_pod_tags_to_volume_metrics) self.transformers = {} for d in [ diff --git a/kubelet/datadog_checks/kubelet/prometheus.py b/kubelet/datadog_checks/kubelet/prometheus.py index fa2537a598226..5321809bb68b1 100644 --- a/kubelet/datadog_checks/kubelet/prometheus.py +++ b/kubelet/datadog_checks/kubelet/prometheus.py @@ -367,7 +367,7 @@ def _process_usage_metric(self, m_name, metric, cache, scraper_config, labels=No labels = [] # track containers that still exist in the cache - seen_keys = {k: False for k in cache} + seen_keys = dict.fromkeys(cache, False) samples = self._sum_values_by_context(metric, self._get_entity_id_if_container_metric) for c_id, sample in samples.items(): diff --git a/mapreduce/tests/common.py b/mapreduce/tests/common.py index 64466d2c55981..80b62156394b3 100644 --- a/mapreduce/tests/common.py +++ b/mapreduce/tests/common.py @@ -82,7 +82,7 @@ def setup_mapreduce(): @contextmanager def mock_local_mapreduce_dns(): - mapping = {x: ('127.0.0.1', None) for x in MOCKED_E2E_HOSTS} + mapping = dict.fromkeys(MOCKED_E2E_HOSTS, ('127.0.0.1', None)) with mock_local(mapping): yield diff --git a/mongo/datadog_checks/mongo/discovery.py b/mongo/datadog_checks/mongo/discovery.py index a9516029c11c6..6ea06e220253e 100644 --- a/mongo/datadog_checks/mongo/discovery.py +++ b/mongo/datadog_checks/mongo/discovery.py @@ -17,7 +17,7 @@ def __init__(self, check): super(MongoDBDatabaseAutodiscovery, self).__init__( self._list_databases, - include={db: 0 for db in self._autodiscovery_config.get("include", [".*"])}, + include=dict.fromkeys(self._autodiscovery_config.get("include", [".*"]), 0), exclude=self._autodiscovery_config.get("exclude"), interval=self._autodiscovery_config.get('refresh_interval', DEFAULT_REFRESH_INTERVAL), ) diff --git a/network/datadog_checks/network/network.py b/network/datadog_checks/network/network.py index 18761deef2846..b58c6bcf23dd8 100644 --- a/network/datadog_checks/network/network.py +++ b/network/datadog_checks/network/network.py @@ -317,7 +317,7 @@ def get_net_proc_base_location(proc_location): return net_proc_base_location def _get_metrics(self): - return {val: 0 for val in self.cx_state_gauge.values()} + return dict.fromkeys(self.cx_state_gauge.values(), 0) def parse_cx_state(self, lines, tcp_states, state_col, protocol=None, ip_version=None): """ diff --git a/nfsstat/tests/test_nfsstat.py b/nfsstat/tests/test_nfsstat.py index 8a0ff0fab56a2..25048382831c8 100644 --- a/nfsstat/tests/test_nfsstat.py +++ b/nfsstat/tests/test_nfsstat.py @@ -65,8 +65,8 @@ def test_check(self, aggregator): tags_unicode.extend( [ u'nfs_server:192.168.34.1', - u'nfs_export:/exports/nfs/datadog/thr\u00E9\u00E9', - u'nfs_mount:/mnt/datadog/thr\u00E9\u00E9', + u'nfs_export:/exports/nfs/datadog/thr\u00e9\u00e9', + u'nfs_mount:/mnt/datadog/thr\u00e9\u00e9', ] ) diff --git a/postgres/tests/test_statements.py b/postgres/tests/test_statements.py index 756847edb1a3c..f5ac3ce305351 100644 --- a/postgres/tests/test_statements.py +++ b/postgres/tests/test_statements.py @@ -798,7 +798,7 @@ def test_failed_explain_handling( "FROM persons WHERE city = %s", # Use some multi-byte characters (the euro symbol) so we can validate that the code is correctly # looking at the length in bytes when testing for truncated statements - u'\u20AC\u20AC\u20AC\u20AC\u20AC\u20AC\u20AC\u20AC\u20AC\u20AC', + u'\u20ac\u20ac\u20ac\u20ac\u20ac\u20ac\u20ac\u20ac\u20ac\u20ac', "error:explain-query_truncated-track_activity_query_size=1024", [{'code': 'query_truncated', 'message': 'track_activity_query_size=1024'}], StatementTruncationState.truncated.value, diff --git a/process/tests/test_process.py b/process/tests/test_process.py index 8eed74076d967..e2ef92e9040e7 100644 --- a/process/tests/test_process.py +++ b/process/tests/test_process.py @@ -168,7 +168,7 @@ def mock_psutil_wrapper(method, accessors): if accessors is None: result = 0 else: - result = {accessor: 0 for accessor in accessors} + result = dict.fromkeys(accessors, 0) return result diff --git a/rabbitmq/datadog_checks/rabbitmq/rabbitmq.py b/rabbitmq/datadog_checks/rabbitmq/rabbitmq.py index f52d2d9c8d62f..5b4b3c35b7640 100644 --- a/rabbitmq/datadog_checks/rabbitmq/rabbitmq.py +++ b/rabbitmq/datadog_checks/rabbitmq/rabbitmq.py @@ -490,7 +490,7 @@ def get_connections_stat(self, instance, base_url, object_type, vhosts, limit_vh if grab_all_data or not len(data): data = self._get_data(urljoin(base_url, object_type)) - stats = {vhost: 0 for vhost in vhosts} + stats = dict.fromkeys(vhosts, 0) connection_states = defaultdict(int) for conn in data: if conn['vhost'] in vhosts: diff --git a/teleport/datadog_checks/teleport/metrics.py b/teleport/datadog_checks/teleport/metrics.py index eae25afcfd34e..9639844cb9cbf 100644 --- a/teleport/datadog_checks/teleport/metrics.py +++ b/teleport/datadog_checks/teleport/metrics.py @@ -208,10 +208,10 @@ } METRIC_MAP_BY_SERVICE = { - **{metric: "teleport" for metric in COMMON_METRICS_MAP.keys()}, - **{metric: "proxy" for metric in PROXY_METRICS_MAP.keys()}, - **{metric: "auth" for metric in AUTH_METRICS_MAP.keys()}, - **{metric: "ssh" for metric in SSH_METRICS_MAP.keys()}, - **{metric: "kubernetes" for metric in KUBERNETES_METRICS_MAP.keys()}, - **{metric: "database" for metric in DATABASE_METRICS_MAP.keys()}, + **dict.fromkeys(COMMON_METRICS_MAP.keys(), "teleport"), + **dict.fromkeys(PROXY_METRICS_MAP.keys(), "proxy"), + **dict.fromkeys(AUTH_METRICS_MAP.keys(), "auth"), + **dict.fromkeys(SSH_METRICS_MAP.keys(), "ssh"), + **dict.fromkeys(KUBERNETES_METRICS_MAP.keys(), "kubernetes"), + **dict.fromkeys(DATABASE_METRICS_MAP.keys(), "database"), } diff --git a/temporal/scripts/generate_metadata.py b/temporal/scripts/generate_metadata.py index 5275d49c75d12..7c03b9292b518 100644 --- a/temporal/scripts/generate_metadata.py +++ b/temporal/scripts/generate_metadata.py @@ -41,7 +41,7 @@ def main(): def append_metric_metadata(metric_name, metric_type='count', unit_name=None): qualified_metric_name = f'temporal.server.{metric_name}' - metric_meta = {k: '' for k in metadata_fields} + metric_meta = dict.fromkeys(metadata_fields, '') metric_meta['orientation'] = 0 metric_meta.update(previous_metadata.get(qualified_metric_name, {})) metric_meta['integration'] = 'temporal' diff --git a/torchserve/datadog_checks/torchserve/model_discovery.py b/torchserve/datadog_checks/torchserve/model_discovery.py index d8309a71c9e08..a01812d0f3b8c 100644 --- a/torchserve/datadog_checks/torchserve/model_discovery.py +++ b/torchserve/datadog_checks/torchserve/model_discovery.py @@ -11,7 +11,7 @@ def __init__(self, check, limit=None, include=None, exclude=None, interval=None) super().__init__( self.get_models, limit=limit, - include={pattern: None for pattern in include} if include else None, + include=dict.fromkeys(include) if include else None, exclude=exclude, interval=interval, key=lambda n: n.get("modelName"),