From 307350d090994de30aacc809de022796169deb7c Mon Sep 17 00:00:00 2001 From: kiblik <5609770+kiblik@users.noreply.github.com> Date: Sun, 2 Feb 2025 12:49:41 +0100 Subject: [PATCH] Ruff: Add and fix PLR6104 --- dojo/benchmark/views.py | 4 +--- dojo/filters.py | 5 ++--- dojo/finding/views.py | 4 ++-- dojo/metrics/utils.py | 2 +- dojo/models.py | 10 +++++----- dojo/settings/settings.dist.py | 2 +- dojo/survey/views.py | 2 +- dojo/templatetags/announcement_banner_tags.py | 2 +- dojo/templatetags/get_banner.py | 2 +- dojo/tools/api_bugcrowd/api_client.py | 2 +- dojo/tools/arachni/parser.py | 8 ++------ dojo/tools/burp/parser.py | 8 ++------ dojo/tools/burp_graphql/parser.py | 2 +- dojo/tools/checkmarx/parser.py | 4 ++-- dojo/tools/cyclonedx/json_parser.py | 5 +---- dojo/tools/cyclonedx/xml_parser.py | 5 +---- dojo/tools/dependency_check/parser.py | 5 +---- dojo/tools/dependency_track/parser.py | 8 ++++---- dojo/tools/hcl_asoc_sast/parser.py | 2 +- dojo/tools/jfrog_xray_on_demand_binary_scan/parser.py | 2 +- dojo/tools/jfrog_xray_unified/parser.py | 2 +- dojo/tools/jfrogxray/parser.py | 2 +- dojo/tools/mobsf/parser.py | 4 ++-- dojo/tools/scout_suite/parser.py | 6 +++--- dojo/tools/sonarqube/soprasteria_helper.py | 2 +- dojo/tools/ssl_labs/parser.py | 4 ++-- dojo/tools/trufflehog/parser.py | 4 ++-- dojo/tools/trufflehog3/parser.py | 2 +- dojo/utils.py | 6 +++--- ruff.toml | 2 +- tests/zap.py | 8 ++++---- unittests/test_finding_helper.py | 2 +- unittests/test_import_reimport.py | 4 ++-- unittests/tools/test_aqua_parser.py | 4 ++-- 34 files changed, 58 insertions(+), 78 deletions(-) diff --git a/dojo/benchmark/views.py b/dojo/benchmark/views.py index 5c45afd0584..39fc3702a55 100644 --- a/dojo/benchmark/views.py +++ b/dojo/benchmark/views.py @@ -118,9 +118,7 @@ def return_score(queryset): for item in queryset: if item["pass_fail"]: asvs_level_1_score = item["pass_fail__count"] - asvs_level_1_benchmark = ( - asvs_level_1_benchmark + item["pass_fail__count"] - ) + asvs_level_1_benchmark += item["pass_fail__count"] return asvs_level_1_benchmark, asvs_level_1_score diff --git a/dojo/filters.py b/dojo/filters.py index 844d975c4e1..ff905bc42d4 100644 --- a/dojo/filters.py +++ b/dojo/filters.py @@ -334,8 +334,7 @@ def __init__(self, *args, **kwargs): # we defer applying the select2 autocomplete because there can be multiple forms on the same page # and form.js would then apply select2 multiple times, resulting in duplicated fields # the initialization now happens in filter_js_snippet.html - self.form.fields[field].widget.tag_options = \ - self.form.fields[field].widget.tag_options + tagulous.models.options.TagOptions(autocomplete_settings={"width": "200px", "defer": True}) + self.form.fields[field].widget.tag_options += tagulous.models.options.TagOptions(autocomplete_settings={"width": "200px", "defer": True}) tagged_model, exclude = get_tags_model_from_field_name(field) if tagged_model: # only if not the normal tags field self.form.fields[field].label = get_tags_label_from_model(tagged_model) @@ -1592,7 +1591,7 @@ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def filter_percentage(self, queryset, name, value): - value = value / decimal.Decimal("100.0") + value /= decimal.Decimal("100.0") # Provide some wiggle room for filtering since the UI rounds to two places (and because floats): # a user may enter 0.15, but we'll return everything in [0.0015, 0.0016). # To do this, add to our value 1^(whatever the exponent for our least significant digit place is), but ensure diff --git a/dojo/finding/views.py b/dojo/finding/views.py index 6773ebb4a1e..2bd5c5c4780 100644 --- a/dojo/finding/views.py +++ b/dojo/finding/views.py @@ -1379,9 +1379,9 @@ def defect_finding_review(request, fid): # Add the closing note if push_to_jira and not finding_in_group: if defect_choice == "Close Finding": - new_note.entry = new_note.entry + "\nJira issue set to resolved." + new_note.entry += "\nJira issue set to resolved." else: - new_note.entry = new_note.entry + "\nJira issue re-opened." + new_note.entry += "\nJira issue re-opened." jira_helper.add_comment(finding, new_note, force_push=True) # Save the finding finding.save(push_to_jira=(push_to_jira and not finding_in_group)) diff --git a/dojo/metrics/utils.py b/dojo/metrics/utils.py index e1b59b6a3d4..9647f84c7f9 100644 --- a/dojo/metrics/utils.py +++ b/dojo/metrics/utils.py @@ -433,7 +433,7 @@ def get_charting_data( if period == MetricsPeriod.WEEK: # For weeks, start at the first day of the specified week start_date = datetime(start_date.year, start_date.month, start_date.day, tzinfo=tz) - start_date = start_date + timedelta(days=-start_date.weekday()) + start_date += timedelta(days=-start_date.weekday()) else: # For months, start on the first day of the month start_date = datetime(start_date.year, start_date.month, 1, tzinfo=tz) diff --git a/dojo/models.py b/dojo/models.py index 19da79d68bc..b122a1c65c9 100644 --- a/dojo/models.py +++ b/dojo/models.py @@ -857,11 +857,11 @@ def calc_health(self): health = 100 if c_findings.count() > 0: health = 40 - health = health - ((c_findings.count() - 1) * 5) + health -= ((c_findings.count() - 1) * 5) if h_findings.count() > 0: if health == 100: health = 60 - health = health - ((h_findings.count() - 1) * 2) + health -= ((h_findings.count() - 1) * 2) if health < 5: return 5 return health @@ -2835,16 +2835,16 @@ def compute_hash_code(self): if hashcodeField == "endpoints": # For endpoints, need to compute the field myEndpoints = self.get_endpoints() - fields_to_hash = fields_to_hash + myEndpoints + fields_to_hash += myEndpoints deduplicationLogger.debug(hashcodeField + " : " + myEndpoints) elif hashcodeField == "vulnerability_ids": # For vulnerability_ids, need to compute the field my_vulnerability_ids = self.get_vulnerability_ids() - fields_to_hash = fields_to_hash + my_vulnerability_ids + fields_to_hash += my_vulnerability_ids deduplicationLogger.debug(hashcodeField + " : " + my_vulnerability_ids) else: # Generically use the finding attribute having the same name, converts to str in case it's integer - fields_to_hash = fields_to_hash + str(getattr(self, hashcodeField)) + fields_to_hash += str(getattr(self, hashcodeField)) deduplicationLogger.debug(hashcodeField + " : " + str(getattr(self, hashcodeField))) deduplicationLogger.debug("compute_hash_code - fields_to_hash = " + fields_to_hash) return self.hash_fields(fields_to_hash) diff --git a/dojo/settings/settings.dist.py b/dojo/settings/settings.dist.py index 53e5c595dd9..d6cb4007ede 100644 --- a/dojo/settings/settings.dist.py +++ b/dojo/settings/settings.dist.py @@ -884,7 +884,7 @@ def generate_url(scheme, double_slashes, user, password, host, port, path, param # https://warehouse.python.org/project/whitenoise/ "whitenoise.middleware.WhiteNoiseMiddleware", ] - MIDDLEWARE = MIDDLEWARE + WHITE_NOISE + MIDDLEWARE += WHITE_NOISE EMAIL_CONFIG = env.email_url( "DD_EMAIL_URL", default="smtp://user@:password@localhost:25") diff --git a/dojo/survey/views.py b/dojo/survey/views.py index 544d90fddca..a3262b638f0 100644 --- a/dojo/survey/views.py +++ b/dojo/survey/views.py @@ -789,7 +789,7 @@ def answer_empty_survey(request, esid): survey.responder = request.user if not request.user.is_anonymous else None survey.answered_on = date.today() survey.save() - general_survey.num_responses = general_survey.num_responses + 1 + general_survey.num_responses += 1 general_survey.save() if request.user.is_anonymous: message = "Your responses have been recorded." diff --git a/dojo/templatetags/announcement_banner_tags.py b/dojo/templatetags/announcement_banner_tags.py index 8600bb3b791..38e473350fc 100644 --- a/dojo/templatetags/announcement_banner_tags.py +++ b/dojo/templatetags/announcement_banner_tags.py @@ -9,7 +9,7 @@ @register.filter def bleach_announcement_message(message): allowed_attributes = bleach.ALLOWED_ATTRIBUTES - allowed_attributes["a"] = allowed_attributes["a"] + ["style", "target"] + allowed_attributes["a"] += ["style", "target"] return mark_safe(bleach.clean( message, attributes=allowed_attributes, diff --git a/dojo/templatetags/get_banner.py b/dojo/templatetags/get_banner.py index ea50061980b..492cc1fddcc 100644 --- a/dojo/templatetags/get_banner.py +++ b/dojo/templatetags/get_banner.py @@ -17,7 +17,7 @@ def get_banner_conf(attribute): if attribute == "banner_message": # only admin can edit login banner, so we allow html, but still bleach it allowed_attributes = bleach.ALLOWED_ATTRIBUTES - allowed_attributes["a"] = allowed_attributes["a"] + ["style", "target"] + allowed_attributes["a"] += ["style", "target"] return mark_safe(bleach.clean( value, attributes=allowed_attributes, diff --git a/dojo/tools/api_bugcrowd/api_client.py b/dojo/tools/api_bugcrowd/api_client.py index c1672ddfffb..c49de7874d5 100644 --- a/dojo/tools/api_bugcrowd/api_client.py +++ b/dojo/tools/api_bugcrowd/api_client.py @@ -136,7 +136,7 @@ def test_product_connection(self, api_scan_configuration): api_scan_configuration.service_key_2, ) for page in submission_gen: - submissions = submissions + page + submissions += page submission_number = len(submissions) return ( f'You have access to "{submission_number}" submissions (no duplicates)' diff --git a/dojo/tools/arachni/parser.py b/dojo/tools/arachni/parser.py index 30947c33509..4acb6144a72 100644 --- a/dojo/tools/arachni/parser.py +++ b/dojo/tools/arachni/parser.py @@ -40,12 +40,8 @@ def get_items(self, tree, test): item = self.get_item(node, report_date) dupe_key = item.severity + item.title if dupe_key in items: - items[dupe_key].unsaved_endpoints = ( - items[dupe_key].unsaved_endpoints + item.unsaved_endpoints - ) - items[dupe_key].unsaved_req_resp = ( - items[dupe_key].unsaved_req_resp + item.unsaved_req_resp - ) + items[dupe_key].unsaved_endpoints += item.unsaved_endpoints + items[dupe_key].unsaved_req_resp += item.unsaved_req_resp items[dupe_key].nb_occurences += 1 else: items[dupe_key] = item diff --git a/dojo/tools/burp/parser.py b/dojo/tools/burp/parser.py index 976872f8c98..58cdd9d731c 100644 --- a/dojo/tools/burp/parser.py +++ b/dojo/tools/burp/parser.py @@ -41,12 +41,8 @@ def get_items(self, tree, test): item = get_item(node, test) dupe_key = item.vuln_id_from_tool if dupe_key in items: - items[dupe_key].unsaved_endpoints = ( - items[dupe_key].unsaved_endpoints + item.unsaved_endpoints - ) - items[dupe_key].unsaved_req_resp = ( - items[dupe_key].unsaved_req_resp + item.unsaved_req_resp - ) + items[dupe_key].unsaved_endpoints += item.unsaved_endpoints + items[dupe_key].unsaved_req_resp += item.unsaved_req_resp # Description details of the finding are added items[dupe_key].description = ( diff --git a/dojo/tools/burp_graphql/parser.py b/dojo/tools/burp_graphql/parser.py index 11df852dc54..bfb6ba7cd48 100644 --- a/dojo/tools/burp_graphql/parser.py +++ b/dojo/tools/burp_graphql/parser.py @@ -88,7 +88,7 @@ def combine_findings(self, finding, issue): finding["Description"] += description + "\n\n" if issue.get("evidence"): - finding["Evidence"] = finding["Evidence"] + self.parse_evidence( + finding["Evidence"] += self.parse_evidence( issue.get("evidence"), ) diff --git a/dojo/tools/checkmarx/parser.py b/dojo/tools/checkmarx/parser.py index 7cca60b57e8..62929948b30 100644 --- a/dojo/tools/checkmarx/parser.py +++ b/dojo/tools/checkmarx/parser.py @@ -74,7 +74,7 @@ def _get_findings_xml(self, filename, test): if language not in language_list: language_list[language] = 1 else: - language_list[language] = language_list[language] + 1 + language_list[language] += 1 if group is not None: findingdetail = f"{findingdetail}**Group:** {group}\n" @@ -177,7 +177,7 @@ def _process_result_file_name_aggregated( # We have already created a finding for this aggregate: updates the # description and the nb_occurences find = dupes[aggregateKeys] - find.nb_occurences = find.nb_occurences + 1 + find.nb_occurences += 1 if find.nb_occurences == 2: find.description = f"### 1. {find.title}\n{find.description}" find.description = f"{find.description}\n\n-----\n### {find.nb_occurences}. {title}\n{findingdetail}\n{description}" diff --git a/dojo/tools/cyclonedx/json_parser.py b/dojo/tools/cyclonedx/json_parser.py index 64bd02b5dd7..6521a9e0a34 100644 --- a/dojo/tools/cyclonedx/json_parser.py +++ b/dojo/tools/cyclonedx/json_parser.py @@ -127,10 +127,7 @@ def _get_findings_json(self, file, test): if not finding.active: detail = analysis.get("detail") if detail: - finding.mitigation = ( - finding.mitigation - + f"\n**This vulnerability is mitigated and/or suppressed:** {detail}\n" - ) + finding.mitigation += f"\n**This vulnerability is mitigated and/or suppressed:** {detail}\n" findings.append(finding) return findings diff --git a/dojo/tools/cyclonedx/xml_parser.py b/dojo/tools/cyclonedx/xml_parser.py index 55aa4995356..0945c3c92c0 100644 --- a/dojo/tools/cyclonedx/xml_parser.py +++ b/dojo/tools/cyclonedx/xml_parser.py @@ -294,9 +294,6 @@ def _manage_vulnerability_xml( "b:detail", namespaces=ns, ) if detail: - finding.mitigation = ( - finding.mitigation - + f"\n**This vulnerability is mitigated and/or suppressed:** {detail}\n" - ) + finding.mitigation += f"\n**This vulnerability is mitigated and/or suppressed:** {detail}\n" findings.append(finding) return findings diff --git a/dojo/tools/dependency_check/parser.py b/dojo/tools/dependency_check/parser.py index 984f436d00b..fb617e70af1 100644 --- a/dojo/tools/dependency_check/parser.py +++ b/dojo/tools/dependency_check/parser.py @@ -298,10 +298,7 @@ def get_finding_from_vulnerability( notes = "Document on why we are suppressing this vulnerability is missing!" tags.append("no_suppression_document") mitigation = f"**This vulnerability is mitigated and/or suppressed:** {notes}\n" - mitigation = ( - mitigation - + f"Update {component_name}:{component_version} to at least the version recommended in the description" - ) + mitigation += f"Update {component_name}:{component_version} to at least the version recommended in the description" mitigated = datetime.datetime.now(datetime.UTC) is_Mitigated = True active = False diff --git a/dojo/tools/dependency_track/parser.py b/dojo/tools/dependency_track/parser.py index 9890b58501c..097db3883e2 100644 --- a/dojo/tools/dependency_track/parser.py +++ b/dojo/tools/dependency_track/parser.py @@ -180,7 +180,7 @@ def _convert_dependency_track_finding_to_dojo_finding(self, dependency_track_fin # Append purl info if it is present if "purl" in dependency_track_finding["component"] and dependency_track_finding["component"]["purl"] is not None: component_purl = dependency_track_finding["component"]["purl"] - vulnerability_description = vulnerability_description + f"\nThe purl of the affected component is: {component_purl}." + vulnerability_description += f"\nThe purl of the affected component is: {component_purl}." # there is no file_path in the report, but defect dojo needs it otherwise it skips deduplication: # see https://github.com/DefectDojo/django-DefectDojo/issues/3647 # might be no longer needed in the future, and is not needed if people use the default @@ -191,11 +191,11 @@ def _convert_dependency_track_finding_to_dojo_finding(self, dependency_track_fin # Append other info about vulnerability description info if it is present if "title" in dependency_track_finding["vulnerability"] and dependency_track_finding["vulnerability"]["title"] is not None: - vulnerability_description = vulnerability_description + "\nVulnerability Title: {title}".format(title=dependency_track_finding["vulnerability"]["title"]) + vulnerability_description += "\nVulnerability Title: {title}".format(title=dependency_track_finding["vulnerability"]["title"]) if "subtitle" in dependency_track_finding["vulnerability"] and dependency_track_finding["vulnerability"]["subtitle"] is not None: - vulnerability_description = vulnerability_description + "\nVulnerability Subtitle: {subtitle}".format(subtitle=dependency_track_finding["vulnerability"]["subtitle"]) + vulnerability_description += "\nVulnerability Subtitle: {subtitle}".format(subtitle=dependency_track_finding["vulnerability"]["subtitle"]) if "description" in dependency_track_finding["vulnerability"] and dependency_track_finding["vulnerability"]["description"] is not None: - vulnerability_description = vulnerability_description + "\nVulnerability Description: {description}".format(description=dependency_track_finding["vulnerability"]["description"]) + vulnerability_description += "\nVulnerability Description: {description}".format(description=dependency_track_finding["vulnerability"]["description"]) if "uuid" in dependency_track_finding["vulnerability"] and dependency_track_finding["vulnerability"]["uuid"] is not None: vuln_id_from_tool = dependency_track_finding["vulnerability"]["uuid"] diff --git a/dojo/tools/hcl_asoc_sast/parser.py b/dojo/tools/hcl_asoc_sast/parser.py index 2ce21d96619..fe1babb2eb3 100644 --- a/dojo/tools/hcl_asoc_sast/parser.py +++ b/dojo/tools/hcl_asoc_sast/parser.py @@ -122,7 +122,7 @@ def get_findings(self, file, test): for codeitem in codeblock: if codeitem.tag == "item" and codeitem.attrib["type"] == "string": if codeitem.text is None: - recommendations = recommendations + "\n" + recommendations += "\n" else: recommendations = recommendations + self.xmltreehelper(codeitem) + "\n" diff --git a/dojo/tools/jfrog_xray_on_demand_binary_scan/parser.py b/dojo/tools/jfrog_xray_on_demand_binary_scan/parser.py index 107230d47e2..b6d1c11a661 100644 --- a/dojo/tools/jfrog_xray_on_demand_binary_scan/parser.py +++ b/dojo/tools/jfrog_xray_on_demand_binary_scan/parser.py @@ -105,7 +105,7 @@ def process_component(component): fixed_versions = component.get("fixed_versions") if fixed_versions: mitigation = "**Versions containing a fix:**\n\n- " - mitigation = mitigation + "\n- ".join(fixed_versions) + mitigation += "\n- ".join(fixed_versions) if "impact_paths" in component: refs = [] impact_paths_l1 = component["impact_paths"] diff --git a/dojo/tools/jfrog_xray_unified/parser.py b/dojo/tools/jfrog_xray_unified/parser.py index e8a41c5d89c..3f394cce345 100644 --- a/dojo/tools/jfrog_xray_unified/parser.py +++ b/dojo/tools/jfrog_xray_unified/parser.py @@ -84,7 +84,7 @@ def get_item(vulnerability, test): and len(vulnerability["fixed_versions"]) > 0 ): mitigation = "Versions containing a fix:\n" - mitigation = mitigation + "\n".join(vulnerability["fixed_versions"]) + mitigation += "\n".join(vulnerability["fixed_versions"]) if ( "external_advisory_source" in vulnerability diff --git a/dojo/tools/jfrogxray/parser.py b/dojo/tools/jfrogxray/parser.py index 54962478d94..0f23576903b 100644 --- a/dojo/tools/jfrogxray/parser.py +++ b/dojo/tools/jfrogxray/parser.py @@ -94,7 +94,7 @@ def get_item(vulnerability, test): if "fixed_versions" in vulnerability["component_versions"]: mitigation = "**Versions containing a fix:**\n" - mitigation = mitigation + "\n".join( + mitigation += "\n".join( vulnerability["component_versions"]["fixed_versions"], ) diff --git a/dojo/tools/mobsf/parser.py b/dojo/tools/mobsf/parser.py index adcabbe25eb..b723be38af8 100644 --- a/dojo/tools/mobsf/parser.py +++ b/dojo/tools/mobsf/parser.py @@ -303,7 +303,7 @@ def get_findings(self, filename, test): file_path = None if "path" in finding: - description = description + "\n\n**Files:**\n" + description += "\n\n**Files:**\n" for path in finding["path"]: if file_path is None: file_path = path @@ -335,7 +335,7 @@ def get_findings(self, filename, test): file_path = None if mobsf_finding["category"]: description += "**Category:** " + mobsf_finding["category"] + "\n\n" - description = description + html2text(mobsf_finding["description"]) + description += html2text(mobsf_finding["description"]) finding = Finding( title=title, cwe=919, # Weaknesses in Mobile Applications diff --git a/dojo/tools/scout_suite/parser.py b/dojo/tools/scout_suite/parser.py index 41eec27de56..3c5a976c0e1 100644 --- a/dojo/tools/scout_suite/parser.py +++ b/dojo/tools/scout_suite/parser.py @@ -120,10 +120,10 @@ def __get_items(self, data): or key[i - 1] == "PolicyDocument" ): break - i = i + 1 + i += 1 self.recursive_print(lookup) - description_text = description_text + self.item_data + description_text += self.item_data self.item_data = "" find = Finding( @@ -166,7 +166,7 @@ def tabs(n): self.recursive_print(litem, depth + 2) else: if self.pdepth != depth: - self.item_data = self.item_data + "\n" + self.item_data += "\n" if key: self.item_data = ( self.item_data diff --git a/dojo/tools/sonarqube/soprasteria_helper.py b/dojo/tools/sonarqube/soprasteria_helper.py index 5e7fe7dff0b..5de3029464c 100644 --- a/dojo/tools/sonarqube/soprasteria_helper.py +++ b/dojo/tools/sonarqube/soprasteria_helper.py @@ -97,7 +97,7 @@ def process_result_file_name_aggregated( find = dupes[aggregateKeys] find.description = f"{find.description}\n{descriptionOneOccurence}" find.mitigation = f"{find.mitigation}\n______\n{vuln_mitigation}" - find.nb_occurences = find.nb_occurences + 1 + find.nb_occurences += 1 # Process one vuln from the report for "SonarQube Scan detailed" # Create the finding and add it into the dupes list diff --git a/dojo/tools/ssl_labs/parser.py b/dojo/tools/ssl_labs/parser.py index 6a1ff7a7d9e..a9850916e48 100644 --- a/dojo/tools/ssl_labs/parser.py +++ b/dojo/tools/ssl_labs/parser.py @@ -108,11 +108,11 @@ def get_findings(self, filename, test): try: if "list" in endpoints["details"]["suites"]: for suites in endpoints["details"]["suites"]["list"]: - suite_info = suite_info + self.suite_data(suites) + suite_info += self.suite_data(suites) elif "suites" in endpoints["details"]: for item in endpoints["details"]["suites"]: for suites in item["list"]: - suite_info = suite_info + self.suite_data( + suite_info += self.suite_data( suites, ) except Exception: diff --git a/dojo/tools/trufflehog/parser.py b/dojo/tools/trufflehog/parser.py index c51f3f8163e..a7f8334625a 100644 --- a/dojo/tools/trufflehog/parser.py +++ b/dojo/tools/trufflehog/parser.py @@ -72,7 +72,7 @@ def get_findings_v2(self, data, test): if dupe_key in dupes: finding = dupes[dupe_key] - finding.description = finding.description + description + finding.description += description finding.nb_occurences += 1 dupes[dupe_key] = finding else: @@ -172,7 +172,7 @@ def get_findings_v3(self, data, test): if dupe_key in dupes: finding = dupes[dupe_key] - finding.description = finding.description + description + finding.description += description finding.nb_occurences += 1 dupes[dupe_key] = finding else: diff --git a/dojo/tools/trufflehog3/parser.py b/dojo/tools/trufflehog3/parser.py index 72331d7ba4c..c146b742a42 100644 --- a/dojo/tools/trufflehog3/parser.py +++ b/dojo/tools/trufflehog3/parser.py @@ -72,7 +72,7 @@ def get_finding_legacy(self, json_data, test, dupes): if dupe_key in dupes: finding = dupes[dupe_key] - finding.description = finding.description + description + finding.description += description finding.nb_occurences += 1 dupes[dupe_key] = finding else: diff --git a/dojo/utils.py b/dojo/utils.py index 6d62ed5a6b3..8274e6ae984 100644 --- a/dojo/utils.py +++ b/dojo/utils.py @@ -1631,11 +1631,11 @@ def get_work_days(start: date, end: date): """ # if the start date is on a weekend, forward the date to next Monday if start.weekday() > WEEKDAY_FRIDAY: - start = start + timedelta(days=7 - start.weekday()) + start += timedelta(days=7 - start.weekday()) # if the end date is on a weekend, rewind the date to the previous Friday if end.weekday() > WEEKDAY_FRIDAY: - end = end - timedelta(days=end.weekday() - WEEKDAY_FRIDAY) + end -= timedelta(days=end.weekday() - WEEKDAY_FRIDAY) if start > end: return 0 @@ -1646,7 +1646,7 @@ def get_work_days(start: date, end: date): remainder = end.weekday() - start.weekday() + 1 if remainder != 0 and end.weekday() < start.weekday(): - remainder = 5 + remainder + remainder += 5 return weeks * 5 + remainder diff --git a/ruff.toml b/ruff.toml index fc3d86b8091..e221fba9174 100644 --- a/ruff.toml +++ b/ruff.toml @@ -75,7 +75,7 @@ select = [ "PGH", "PLC01", "PLC0205", "PLC0208", "PLC0414", "PLC18", "PLC24", "PLC3", "PLE", - "PLR01", "PLR0203", "PLR0206", "PLR04", "PLR0915", "PLR1716", "PLR172", "PLR1733", "PLR1736", "PLR6201", + "PLR01", "PLR0203", "PLR0206", "PLR04", "PLR0915", "PLR1716", "PLR172", "PLR1733", "PLR1736", "PLR6104", "PLR6201", "PLW01", "PLW02", "PLW04", "PLW07", "PLW1", "PLW2", "PLW3", "TRY003", "TRY004", "TRY2", "TRY300", "TRY401", "FLY", diff --git a/tests/zap.py b/tests/zap.py index 8086899332f..6aea4082576 100755 --- a/tests/zap.py +++ b/tests/zap.py @@ -95,13 +95,13 @@ class Main: for details in sort_by_url[url]: if details["risk"] == "Informational": - info = info + 1 + info += 1 if details["risk"] == "Low": - low = low + 1 + low += 1 if details["risk"] == "Medium": - medium = medium + 1 + medium += 1 if details["risk"] == "High": - high = high + 1 + high += 1 summary.add_row(["Informational", info]) summary.add_row(["Low", low]) diff --git a/unittests/test_finding_helper.py b/unittests/test_finding_helper.py index 8d3432864d9..25cd123ef16 100644 --- a/unittests/test_finding_helper.py +++ b/unittests/test_finding_helper.py @@ -53,7 +53,7 @@ def test_no_status_change(self, mock_tz): status_fields = self.get_status_fields(finding) - finding.title = finding.title + "!!!" + finding.title += "!!!" finding.save() self.assertEqual( diff --git a/unittests/test_import_reimport.py b/unittests/test_import_reimport.py index 78792ab2991..9758271409e 100644 --- a/unittests/test_import_reimport.py +++ b/unittests/test_import_reimport.py @@ -1191,12 +1191,12 @@ def test_import_6_reimport_6_gitlab_dep_scan_component_name_and_version(self): self.assertEqual("CVE-2020-29652: Nil Pointer Dereference", finding["title"]) self.assertEqual("CVE-2020-29652", finding["vulnerability_ids"][0]["vulnerability_id"]) self.assertEqual("golang.org/x/crypto", finding["component_name"]) - count = count + 1 + count += 1 elif finding["component_version"] == "v0.3.0" or finding["component_version"] == "v0.3.2": self.assertEqual("CVE-2020-14040: Loop With Unreachable Exit Condition (Infinite Loop)", finding["title"]) self.assertEqual("CVE-2020-14040", finding["vulnerability_ids"][0]["vulnerability_id"]) self.assertEqual("golang.org/x/text", finding["component_name"]) - count = count + 1 + count += 1 self.assertEqual(5, count) diff --git a/unittests/tools/test_aqua_parser.py b/unittests/tools/test_aqua_parser.py index 380c18a2ed1..7b3452f01b9 100644 --- a/unittests/tools/test_aqua_parser.py +++ b/unittests/tools/test_aqua_parser.py @@ -61,7 +61,7 @@ def test_aqua_parser_cvssv3_has_no_finding(self): nb_cvssv3 = 0 for finding in findings: if finding.cvssv3 is not None: - nb_cvssv3 = nb_cvssv3 + 1 + nb_cvssv3 += 1 self.assertEqual(0, nb_cvssv3) @@ -72,7 +72,7 @@ def test_aqua_parser_cvssv3_has_many_findings(self): nb_cvssv3 = 0 for finding in findings: if finding.cvssv3 is not None: - nb_cvssv3 = nb_cvssv3 + 1 + nb_cvssv3 += 1 self.assertEqual(16, nb_cvssv3)