diff --git a/README.md b/README.md index ebf3d51c4..b2c6c2d03 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,8 @@ # VMware Carbon Black Cloud Python SDK -**Latest Version:** 1.5.0 +**Latest Version:** 1.5.1
-**Release Date:** October 24, 2023 +**Release Date:** January 30, 2024 [![Coverage Status](https://coveralls.io/repos/github/carbonblack/carbon-black-cloud-sdk-python/badge.svg?t=Id6Baf)](https://coveralls.io/github/carbonblack/carbon-black-cloud-sdk-python) [![Codeship Status for carbonblack/carbon-black-cloud-sdk-python](https://app.codeship.com/projects/9e55a370-a772-0138-aae4-129773225755/status?branch=develop)](https://app.codeship.com/projects/402767) diff --git a/VERSION b/VERSION index bc80560fa..26ca59460 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -1.5.0 +1.5.1 diff --git a/codeship-services.yml b/codeship-services.yml index d80187932..6799232f0 100644 --- a/codeship-services.yml +++ b/codeship-services.yml @@ -1,3 +1,7 @@ +testingpython37: + build: + dockerfile: ./docker/python3.7/Dockerfile + testingpython38: build: dockerfile: ./docker/python3.8/Dockerfile diff --git a/codeship-steps.yml b/codeship-steps.yml index 32a9d967e..1f3220bcf 100644 --- a/codeship-steps.yml +++ b/codeship-steps.yml @@ -5,6 +5,9 @@ - name: Tests type: parallel steps: + - name: testing python 3.7 + service: testingpython37 + command: pytest - name: testing python 3.8 service: testingpython38 command: bin/tests_n_reports.sh diff --git a/docker/python3.7/Dockerfile b/docker/python3.7/Dockerfile new file mode 100644 index 000000000..eec8be4c8 --- /dev/null +++ b/docker/python3.7/Dockerfile @@ -0,0 +1,7 @@ +from python:3.7 +MAINTAINER cb-developer-network@vmware.com + +COPY . /app +WORKDIR /app + +RUN pip3 install -r requirements.txt \ No newline at end of file diff --git a/docs/alerts.rst b/docs/alerts.rst index 18d302e9c..704236ec8 100644 --- a/docs/alerts.rst +++ b/docs/alerts.rst @@ -15,6 +15,7 @@ Resources * `Alert Search Fields `_ on Developer Network * Example script in `GitHub `_ * If you are updating from SDK version 1.4.3 or earlier, see the `alerts-migration`_ guide. +* If you are updating from Notifications, see the `notification-migration`_ guide. .. note:: In Alerts v7, and therefore SDK 1.5.0 onwards, Observed Alerts are not included; they are an Observation. The field ``category`` @@ -106,13 +107,13 @@ For example, the following snippet returns all types: .. code-block:: python - >>> alerts = cb.select(Alert).set_types([]) + >>> alerts = api.select(Alert).set_types([]) It is equivalent to: .. code-block:: python - >>> alerts = cb.select(Alert) + >>> alerts = api.select(Alert) .. tip:: More information about the ``solrq`` can be found in @@ -152,6 +153,51 @@ You can also read from a csv file by using values that match the profile names i >>> for alert in alerts: ... print(alert.id, alert.device_os, alert.device_name, alert.category) +Grouping Alerts +--------------- + +The examples below illustrates how to create and manipulate grouped alert objects. A Grouped Alert is a collections of alerts that share a common threat id. When grouping alerts by a threat id it allows greater context and insight surrounding the pervasiveness of a threat. + +This first example retrieves all groupings of watchlist alerts from the past 10 days that have a minimum severity level of 3. If this feels familiar to basic alert retrieval, the only difference of note at this stage is that we select a GroupedAlert instead of an Alert. + + >>> from cbc_sdk import CBCloudAPI + >>> from cbc_sdk.platform import GroupedAlert + >>> api = CBCloudAPI(profile="sample") + >>> grouped_alert_search_query = api.select(GroupedAlert) + >>> grouped_alert_search_query = grouped_alert_search_query.set_time_range(range="-10d").add_criteria("type", "WATCHLIST").set_minimum_severity(3) + >>> # trigger the search to execute: + >>> grouped_alert = grouped_alert_search_query.first() + >>> print("Number of groups: {}, Total alerts in all groups {}".format(grouped_alert_search_query._total_results, grouped_alert_search_query._group_by_total_count)) + Number of groups: 19, Total alerts in all groups 2454 + +Also like Alerts, first() can be used on the query to retrieve the first grouping of alerts and study the metadata for a given threat id. + + >>> first_alert_grouping = grouped_alert_search_query.first() + >>> print(first_alert_grouping.count, first_alert_grouping.highest_severity, first_alert_grouping.device_count, first_alert_grouping.workflow_states) + 534 7 3 ("OPEN": 534) + >>> most_recent_alert = first_alert_grouping.most_recent_alert_ + >>> print(most_recent_alert.threat_id) + +It may be necessary to retrieve all of the alerts from a threat id grouping for further inspection, it is possible to directly retrieve the associated alert search query from a given grouped alert + + >>> alert_search_query = first_alert_grouping.get_alert_search_query() + >>> alerts = alert_search_query.all() + +It is also possible to create grouped facets from the group alert search query + + >>> grouped_alert_facets = grouped_alert_search_query.facets(["type", "THREAT_ID"], 0, True) + +Suppose instead of grouped alerts, you had been working with alerts and wanted to crossover to grouped alerts. Instead of building a new group alert query from scratch you can transform an alert search query into a grouped alert search query or vice versa! + + >>> from cbc_sdk import CBCloudAPI + >>> from cbc_sdk.platform import Alert, GroupedAlert + >>> api = CBCloudAPI(profile="sample") + >>> alert_search_query = api.select(Alert) + >>> alert_search_query = alert_search_query.set_time_range(range="-10d").add_criteria("type", "WATCHLIST").set_minimum_severity(3) + >>> group_alert_search_query = alert_search_query.set_group_by("threat_id") + >>> alert_search_query = group_alert_search_query.get_alert_search_query() +.. note:: + When transforming from one query type to another the sort order parameter is not preserved. If it is necessary, it will have to be added to the queries criteria manually. Retrieving Observations to Provide Context About an Alert --------------------------------------------------------- @@ -275,7 +321,8 @@ The workflow leverages the alert search structure to specify the alerts to close * Two common uses are to update one alert, or to update all alerts with a specific threat id. * Any search request can be used as the criteria to select alerts to update the alert status. - .. code-block:: python +.. code-block:: python + >>> # This query will select only the alert with the specified id >>> ALERT_ID = "id of the alert that you want to close" >>> alert_query = api.select(Alert).add_criteria("id", [ALERT_ID]) @@ -287,71 +334,49 @@ The workflow leverages the alert search structure to specify the alerts to close * The status can be ``OPEN``, ``IN PROGRESS`` or ``CLOSED`` (previously ``DISMISSED``). * You may include a Closure Reason. - .. code-block:: python +.. code-block:: python + >>> # by calling update on the alert_query, the a request to change the status >>> # for all alerts matching that criteria will be submitted >>> job = alert_query.update("CLOSED", "RESOLVED", "NONE", "Setting to closed for SDK demo") 3. The immediate response confirms that the job was successfully submitted. - .. code-block:: python - >>> print("job.id = {}".format(job.id)) - job.id = 1234567 +.. code-block:: python + + >>> print("job.id = {}".format(job.id)) + job.id = 1234567 4. Use the :py:mod:`Job() cbc_sdk.platform.jobs.Job` class to determine when the update is complete. Use the Job object to wait until the Job has completed. The python script will wait while the SDK polls to determine when the job is complete. - .. code-block:: python +.. code-block:: python + >>> completed_job = job.await_completion().result() 5. Refresh the Alert Search to get the updated alert data into the SDK. - .. code-block:: python +.. code-block:: python + >>> alert.refresh() >>> print("Status = {}, Expecting CLOSED".format(alert.workflow["status"])) 6. You can dismiss future Alerts that have the same threat id. -Use the sequence of calls to update future alerts that have the same threat id. This sequence is usually used in conjunction with - with the alert closure; that is, you can use the dismiss future alerts call to close future occurrences and call an - alert closure to close current open alerts that have the threat id. + Use the sequence of calls to update future alerts that have the same threat id. This sequence is usually used in + conjunction with with the alert closure; that is, you can use the dismiss future alerts call to close future + occurrences and call an alert closure to close current open alerts that have the threat id. + +.. code-block:: python - .. code-block:: python >>> alert_threat_query = api.select(Alert).add_criteria("threat_id","CFED0B211ED09F8EC1C83D4F3FBF1709") >>> alert.dismiss_threat("threat remediation done", "testing dismiss_threat in the SDK") >>> # To undo the dismissal, call update >>> alert.update_threat("threat remediation un-done", "testing update_threat in the SDK") -Migrating from Notifications to Alerts --------------------------------------- - -.. note:: - The Notifications API is deprecated, and deactivation is planned for 31 October 2024. - - For information about migrating from the API and alternative solutions, see - `IntegrationService notification v3 API Migration Guide `_ - -Notifications work on a subscription-based principle and they require a SIEM authentication key. -By using that key, you are subscribing to a certain criteria of alerts. As this is deprecated, new alert types -cannot be retrieved from the notifications API. - -See `the official notes `_ in the Carbon Black API website. - -.. image:: _static/cbc_platform_notification_edit.png - :alt: Editing a notification in the CBC Platform - :align: center - -You can replicate the settings shown in the screenshot by running the following search on Alerts: - -.. code-block:: python - >>> from cbc_sdk import CBCloudAPI - >>> from cbc_sdk.platform import Alert - >>> alerts = api.select(Alert).set_minimum_severity(7).\ - >>> add_criteria("type", ["CB_ANALYTICS", "DEVICE_CONTROL"]).\ - >>> add_criteria("device_policy", "Standard") High Volume and Streaming Solution for Alerts --------------------------------------------- diff --git a/docs/asset-groups.rst b/docs/asset-groups.rst new file mode 100644 index 000000000..ede8c8780 --- /dev/null +++ b/docs/asset-groups.rst @@ -0,0 +1,215 @@ +Asset Groups +============ + +Asset Groups provide a way to organize and manage your fleet of Endpoints, VM Workloads, and VDIs. +Create groups of assets and apply policies to the groups so the protections of all similar assets are synchronized. +The ability to add one asset to multiple groups, and rank policies for precedence in application, gives added +flexibility and fine tuning for complex organizations. + +You can locate the full list of operations and attributes in the +:py:mod:`AssetGroup() ` class. + +Resources +--------- +* `API Documentation `_ on Developer Network +* Example script in `GitHub `_ + +Retrieve Asset Groups +--------------------- + +There two options for getting a list of asset groups. The function ``get_all_groups()`` does exactly that; returns all +Asset Groups in your organization. + + >>> from cbc_sdk import CBCloudAPI + >>> from cbc_sdk.platform import AssetGroup + >>> api = CBCloudAPI(profile='sample') + >>> all_asset_groups = AssetGroup.get_all_groups(api) + >>> print("There are {} asset groups. First group: {}".format(len(all_asset_groups), all_asset_groups[0])) + There are 1 asset groups. This is the first: AssetGroup object, bound to https://defense.conferdeploy.net. + Partially initialized. Use .refresh() to load all attributes + ------------------------------------------------------------------------------- + create_time: 2024-01-24T04:38:26.930Z + description: Windows No Policy + discovered: False + id: 34fc5890-caf0-400a-98ba-a81763960f6e + member_count: 1030 + member_type: DEVICE + name: Windows No Policy + org_key: 7desj9gn + query: os.equals: "WINDOWS" + status: OK + update_time: 2024-01-24T04:38:27.972Z + +Asset groups can also be searched using ``name``, ``policy_id`` or ``group_id`` in the criteria element. + +The example shows creating an ``AssetGroupQuery`` class, then adding criteria to limit the results and specifying the +field to sort by. The query is not executed until it accessed, in this case by iterating over the results. + +Summary information for each asset group is printed, and then the devices in that asset group are listed. + + >>> search_asset_group_query = api.select(AssetGroup) + >>> search_asset_group_query.add_criteria("name", "Second demo group") + >>> search_asset_group_query.sort_by("name", "ASC") + >>> for ag in search_asset_group_query: + >>> print("\nAsset group name = {}. It has {} members".format(ag.name, ag.member_count)) + >>> print("Policy assigned to the Asset Group is Name: {}, Id: {}".format(ag.policy_name, ag.policy_id)) + >>> for d in ag.list_members(): + >>> print("Device Name: {}, Id: {}".format(d.name, d.id)) + Asset group name = Second demo group. It has 3 members + Policy assigned to the Asset Group is Name: DemoPolicy, Id: 123456 + Device Name: DemoDevice, Id: 2468642 + Device Name: SDKDemo, Id: 1357975 + Device Name: AnotherDemoMachine, Id: 19283746 + ...truncated ... + +Create an Asset Group +--------------------- + +The only required field when creating an asset group is the Asset Group Name. + +Creating a group without a policy assigned enables the use of a group for visibility of specific assets. +After creation, it is possible in use any of combination of assigning assets directly, adding a query or assigning +a policy. + + >>> new_asset_group = AssetGroup.create_group(api, "My Example Asset Group", description="Demonstrating the SDK") + >>> print(new_asset_group) + AssetGroup object, bound to https://defense.conferdeploy.net. + ------------------------------------------------------------------------------- + create_time: 2024-01-24T05:47:34.378Z + description: Demonstrating the SDK + discovered: False + id: aae06712-96d4-43ea-ae67-07112d6f670e + member_count: 0 + member_type: DEVICE + name: My Example Asset Group + org_key: ABCD1234 + status: OK + update_time: 2024-01-24T05:47:34.378Z + +Now add a query which will dynamically include any asset with the Windows operating system and a policy: + + >>> new_asset_group.query = "os.equals:WINDOWS" + >>> new_asset_group.policy_id = 12345 + >>> new_asset_group.save() + +Parts of Carbon Black Cloud have asynchronous processing and are eventually consistent. +When writing automated scripts, use the status field to determine when the asset group membership has +finished updating. + +* ``OK`` indicates the membership evaluation is complete +* ``UPDATING`` indicates that group’s dynamic memberships are being re-evaluated + + >>> import time + >>> while new_asset_group.status != "OK": + >>> print("waiting") + >>> time.sleep(5) + >>> new_asset_group.refresh() + +Then print the new asset: + + >>> print("new_asset_group {}".format(new_asset_group)) + new_asset_group, bound to https://defense.conferdeploy.net. + Last refreshed at Tue Jan 23 22:47:47 2024 + ------------------------------------------------------------------------------- + create_time: 2024-01-24T05:47:35.150Z + description: Demonstrating the SDK + discovered: False + id: ceb27e6c-7c23-4dd5-af7a-3b0c14363240 + member_count: 204 + member_type: DEVICE + name: My Example Asset Group + org_key: ABCD1234 + policy_id: 12345 + policy_name: DemoPolicy + query: os.equals:WINDOWS + status: OK + update_time: 2024-01-24T05:47:35.585Z + AssetGroup object, bound to https://defense.conferdeploy.net. + + +All attributes can also be provided to the create method: + + >>> second_asset_group = AssetGroup.create_group(api, "Second example group","Second group description", + ... query = "os.equals:MAC", policy_id = 12345) + +The add_member() function is used to assign a device directly to the group. (Compared to dynamically added, when the +device matches the query on the asset group.) + + >>> from cbc_sdk.platform import Device + >>> random_device = api.select(Device).first() + >>> second_asset_group.add_members(random_device) + +Delete an Asset Group +--------------------- + +To delete an Asset Group, use the delete method: + + >>> second_asset_group.delete() + +Preview Policy Rank Changes +--------------------------- + +The effective policy on a specific device is determined by the rank of policies the device is assigned, with higher +ranked policies taking precedence. + +The `example script `_ +includes finding two policies that are likely have have impactful changes. This snippet uses hardcoded values so the +focus is on the method being called and output. + +The preview method is a static class method on Policy, since it is a policy change that is being previewed. + +The result is a :py:mod:`DevicePolicyChangePreview() ` class, +which contains information about all the device that would have a change in effective policy. + + >>> from cbc_sdk.platform import Policy + >>> api = CBCloudAPI(profile='sample') + >>> policy_id = 1234 + >>> # to get a policy that exists in your org: policy_id = api.select(Policy).first().id + >>> new_policy_position = 1 + >>> changes = Policy.preview_policy_rank_changes(api, [(policy_id, new_policy_position)]) + >>> print(changes[0]) + DevicePolicyChangePreview object, bound to https://defense.conferdeploy.net. + ------------------------------------------------------------------------------- + Current policy: #98765 at rank 7 + New policy: #1234 at rank 1 + Asset count: 264 + Asset query: ((-_exists_:ag_agg_key_manual AND ag_agg_key_dynamic:9b0a62b19086bdbfcff5c62e581304a28cd445aee86d87c6d95c57483ae5e05b AND policy_id:100714 AND policy_override:false) AND (os.equals: "WINDOWS")) + +This ``change`` says there's an asset group that is currently using policy id 98765 which is ranked 7. +If the change was processed the asset group would use a new policy, id 1234 which is at rank 1. This would affect 264 +Assets and the Asset query can be used to find those Assets. + +The Asset Query is a class of type ``DeviceSearchQuery`` which can be executed: + + >>> devices = changes[0].asset_query + >>> print("type of devices object is {}".format(type(devices))) + >>> print(len(devices)) + type of devices object is + 264 + +Preview Asset Group Changes +--------------------------- + +Previewing the changes that would happen if an asset group was changed is very similar to the Preview Policy Rank +Changes above. + +Once Asset Groups have been created and policies assigned, the preview asset group changes function can be used to +identify the devices that would have their group membership or effective policy impacted by creating or deleting an +Asset Group, or by changing the query on the asset group. + +Here we're working with a random asset group and policy, using the ``first()`` function. + +A new policy is assigned and the existing query is not changed. + + >>> asset_group = api.select(AssetGroup).first() + >>> policy_id = api.select(Policy).first().id + >>> api = CBCloudAPI(profile='sample') + >>> changes = AssetGroup.preview_update_asset_groups(api, [asset_group], policy_id, asset_group.query) + >>> print("There are {} changes that would result from the proposed change. The first change:".format(len(changes))) + >>> print(changes[0]) + DevicePolicyChangePreview object, bound to https://defense.conferdeploy.net. + ------------------------------------------------------------------------------- + Current policy: #148443 at rank 96 + New policy: #80947 at rank 1 + Asset count: 117 + Asset query: ((-_exists_:ag_agg_key_manual AND -_exists_:ag_agg_key_dynamic AND policy_id:148443 AND policy_override:false) AND (os.equals:MAC)) diff --git a/docs/authentication.rst b/docs/authentication.rst index 537749fa6..5405884f6 100644 --- a/docs/authentication.rst +++ b/docs/authentication.rst @@ -166,6 +166,8 @@ CBAPI, so older files can continue to be used. +-------------------------+---------+----------+ |``integration`` | | No | +-------------------------+---------+----------+ +|``default_timeout`` | 300000 | No | ++-------------------------+---------+----------+ **X-AUTH-TOKEN** specific fields @@ -194,14 +196,11 @@ CBAPI, so older files can continue to be used. +-------------------------+---------+----------+ - -Individual profiles or sections are delimited in the file by placing their name within square brackets: ``[profile_name]``. Within -each section, individual credential values are supplied in a ``keyword=value`` format. - +Individual profiles or sections are delimited in the file by placing their name within square brackets: +``[profile_name]``. Within each section, individual credential values are supplied in a ``keyword=value`` format. Unrecognized keywords are ignored. - By default, the CBC SDK looks for credentials files in the following locations: * The ``.carbonblack`` subdirectory of the current directory of the running process. @@ -267,6 +266,8 @@ be specified: +-------------------------+----------------+---------+----------+ |``integration`` | ``REG_SZ`` | | No | +-------------------------+----------------+---------+----------+ +|``default_timeout`` | ``REG_DWORD`` | 300000 | No | ++-------------------------+----------------+---------+----------+ **X-AUTH-TOKEN** specific fields @@ -323,22 +324,22 @@ Note the use of doubled backslashes to properly escape them under Python. With an External Credential Provider ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Credentials may also be supplied by writing a class that conforms to the ``CredentialProvider`` interface protocol. -When creating :py:mod:`CBCloudAPI `, pass a reference to a ``CredentialProvider`` object in the ``credential_provider`` keyword -parameter. Then pass the name of the profile you want to retrieve from the provider object using the keyword parameter -``profile``. +When creating :py:mod:`CBCloudAPI `, pass a reference to a ``CredentialProvider`` object +in the ``credential_provider`` keyword parameter. Then pass the name of the profile you want to retrieve from the +provider object using the keyword parameter ``profile``. **Example:** >>> provider = MyCredentialProvider() >>> cbc_api = CBCloudAPI(credential_provider=provider, profile='default') -Details of writing a credential provider may be found in the :doc:`Developing a Custom Credential Provider ` -document. +Details of writing a credential provider may be found in the +:doc:`Developing a Custom Credential Provider ` document. At Runtime ^^^^^^^^^^ -The credentials may be passed into the :py:mod:`CBCloudAPI ` object when it is created via the keyword parameters ``url``, -``token``, ``org_key``, and (optionally) ``ssl_verify`` and ``integration_name``. +The credentials may be passed into the :py:mod:`CBCloudAPI ` object when it is created +via the keyword parameters ``url``, ``token``, ``org_key``, and (optionally) ``ssl_verify`` and ``integration_name``. **Example:** @@ -348,7 +349,6 @@ The credentials may be passed into the :py:mod:`CBCloudAPI ` must be either ``None`` or left -unspecified. (The ``profile`` keyword parameter will be ignored.) - +``CBAPI_TOKEN``), and the ``credential_file`` keyword parameter to :py:mod:`CBCloudAPI ` +must be either ``None`` or left unspecified. (The ``profile`` keyword parameter will be ignored.) **N.B.:** Passing credentials via the environment can be insecure, and, if this method is used, a warning message to that effect will be generated in the log. @@ -374,12 +373,14 @@ we are going to use JSON to store our other entries, the JSON is going to be sto CLI tool(``/bin/set-macos-keychain.py``) or by manually creating it. The tool can: - * Automatically import all of your profiles set in the ``credentials.cbc`` file. Or by setting a custom path to a file. + * Automatically import all of your profiles set in the ``credentials.cbc`` file. Or by setting a custom path + to a file. * Manually input the values of your credentials via prompt or by using system arguments. Find out how to use the script in its docstring or by using ``--help``. -You can remove the keys that you won't be using or leave them empty. Reference our :ref:`Explanation of API Credential Components`. +You can remove the keys that you won't be using or leave them empty. Reference our +:ref:`Explanation of API Credential Components`. .. code-block:: javascript @@ -393,7 +394,8 @@ You can remove the keys that you won't be using or leave them empty. Reference o "ssl_force_tls_1_2": true, "proxy": "", "ignore_system_proxy": true, - "integration": "" + "integration": "", + "default_timeout": 300000 } .. note:: @@ -424,16 +426,19 @@ With Amazon Secrets Manger Configure the AWS credentials ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -A full and comprehensive guide configuring the files and credentials regarding AWS can be found in their `official documentation. `_ +A full and comprehensive guide configuring the files and credentials regarding AWS can be found in their +`official documentation. `_ Adding a secret to the AWS Secrets Manager ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -There is an official `guide for creating a secret `_ by AWS. +There is an official +`guide for creating a secret `_ +by AWS. .. note:: - Add your secrets as a key/value pairs. In the :ref:`Explanation of API Credential Components` you can find full information on required fields and their purpose. - + Add your secrets as a key/value pairs. In the :ref:`Explanation of API Credential Components` you can find full + information on required fields and their purpose. Using our credential provider for the SDK ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -450,7 +455,9 @@ the credential provider. AWS Single Sign-On Provider (SSO) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -If you wish to set the SSO provider follow this `tutorial `_ for setting the config. +If you wish to set the SSO provider follow this +`tutorial `_ +for setting the config. Then you can use the ``profile_name`` attribute in the ``AWSCredentialProvider`` like so: @@ -505,6 +512,10 @@ or :ref:`with Windows Registry `, the credentials include | | as part of the ``User-Agent:`` HTTP header on all | | | | | requests made by the SDK. | | | +-------------------------+------------------------------------------------------+---------+----------+ +|``default_timeout`` | The default timeout for search queries, specified in |300000 | No | +| | milliseconds. This value may never be greater than | | | +| | the default of 300000 milliseconds. | | | ++-------------------------+------------------------------------------------------+---------+----------+ **X-AUTH-TOKEN** specific fields diff --git a/docs/cbc_sdk.platform.rst b/docs/cbc_sdk.platform.rst index 459691698..e136d58b1 100644 --- a/docs/cbc_sdk.platform.rst +++ b/docs/cbc_sdk.platform.rst @@ -9,6 +9,9 @@ Base Module :inherited-members: :show-inheritance: +Submodules +---------- + Alerts Module ------------------------------- @@ -17,6 +20,14 @@ Alerts Module :inherited-members: :show-inheritance: +Asset Groups Module +------------------------------------- + +.. automodule:: cbc_sdk.platform.asset_groups + :members: + :inherited-members: + :show-inheritance: + Audit Module ------------------------------ @@ -97,6 +108,14 @@ RuleConfigs Module :inherited-members: :show-inheritance: +Previewer Module +-------------------------------------------- + +.. automodule:: cbc_sdk.platform.previewer + :members: + :inherited-members: + :show-inheritance: + Processes Module ---------------------------------- diff --git a/docs/changelog.rst b/docs/changelog.rst index 940d7cb18..ae9707f40 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -1,5 +1,50 @@ Changelog ================================ +CBC SDK 1.5.1 - Released January 30, 2024 +----------------------------------------- + +New Features: + +* Asset Groups - Added management of asset groups: + + * Create, delete, and update asset groups (either with manual or dynamic membership) + * Retrieve asset groups by ID + * Search for asset groups, retrieve list of all asset groups + * Add/remove members, get all members in a group + * Get statistics for a group + * Helper functions for ``Device`` to retrieve and maintain group membership + * Preview changes to effective policy for device(s) as a result of a number of different potential changes + * Full documentation and new Guide page + +* Alerts v7 Enhancements - Added additional functionality to Alerts v7 as implemented in version 1.5.0: + + * Search Grouped Alerts, including faceting and retrieval of all alerts for a group + * Get list of watchlists on an alert + * Network threat metadata helper function + * Full update to Alerts guide in documentation + +* Command line deobfuscation added to Processes, Alerts, and Observations, allowing visualization of PowerShell + command lines that have been deliberately obfuscated by attackers. +* New ``scroll()`` method added to Live Query search results. +* New helper methods added to ``Policy`` to enable or disable XDR data collection and auth event data collection. +* New ``export()`` and ``scroll()`` methods added to ``DeviceSearchQuery``. + +Updates: + +* Python 3.7 has been re-added as "unofficially" supported, since certain integrations that use the SDK still use it. +* Added ``deployment_type`` as part of the facets available in ``DeviceSearchQuery``. + +Bug Fixes: + +* Search jobs that allow setting a timeout now default that timeout to 5 minutes. The timeout may be lowered + from that point, but *never* raised beyond it. This eliminates a problem of "hung" searches. + +Documentation: + +* ReadTheDocs generation has been improved to show the inherited methods. There are some helper functions on + ``SearchQuery`` classes such as ``add_criteria()`` inherited from ``CriteriaBuilderSupportMixin`` and ``first()`` + inherited from ``IterableQueryMixin``. + CBC SDK 1.5.0 - Released October 24, 2023 ----------------------------------------- diff --git a/docs/conf.py b/docs/conf.py index dd6336a9e..1cc03df75 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -23,7 +23,7 @@ author = 'Developer Relations' # The full version, including alpha/beta/rc tags -release = '1.5.0' +release = '1.5.1' # -- General configuration --------------------------------------------------- diff --git a/docs/differential-analysis.rst b/docs/differential-analysis.rst index 744103aaf..cbcb033c4 100644 --- a/docs/differential-analysis.rst +++ b/docs/differential-analysis.rst @@ -59,9 +59,9 @@ To receive the actual differential data, use the ``.count_only()`` method, as fe older_run_not_responded_devices: [list:1 item]: [0]: 17331059 -You can also access a dictionary representation of the response with the ``._info`` property. +You can also access a dictionary representation of the response with the ``.to_json()`` method. - >>> print(run._info) + >>> print(run.to_json()) {'diff_processed_time': 0.037, 'diff_results': [{'added_count': 1, 'change_count': 1, @@ -111,7 +111,7 @@ To use this method, append it to the rest of the Differential object query or co >>> query = cb.select(Differential).newer_run_id('jcdqsju4utpaayj5dh5r2llzffeolg0u').older_run_id('yhbg3wcea9y1l4asiltky5tupkgauzas') >>> actual_changes = query.count_only(False).set_device_ids([12345]) >>> run = actual_changes.submit() - >>> print(run._info) + >>> print(run.to_json()) {'diff_processed_time': 0.039, 'diff_results': [{'added_count': 1, 'change_count': 1, diff --git a/docs/guides.rst b/docs/guides.rst index da6581db0..dd19bcce5 100755 --- a/docs/guides.rst +++ b/docs/guides.rst @@ -12,6 +12,7 @@ In general, and unless otherwise indicated, these guides are directed at those t - Have a working knowledge of Python. - Have a basic understanding of what the Carbon Black Cloud does, and its basic terminology such as events, alerts, and watchlists. +- Need information to update to new versions of the SDK when enhanced features are released. Certain guides may be more geared towards audiences with more experience with the Carbon Black Cloud, such as administrators. @@ -26,6 +27,7 @@ Feature Guides searching alerts + asset-groups audit-log developing-credential-providers device-control @@ -43,12 +45,14 @@ Feature Guides * :doc:`searching` - Most operations in the SDK will require you to search for objects. * :doc:`alerts` - Work and manage different types of alerts such as CB Analytics Alert, Watchlist Alerts and Device Control Alerts. +* :doc:`asset-groups` - Create and modify Asset Groups, and preview the impact changes to policy ranking or asset group definition will have. * :doc:`alerts-migration` - Update from SDK 1.4.3 or earlier to SDK 1.5.0 or later to get the benefits of the Alerts v7 API. * :doc:`audit-log` - Retrieve audit log events indicating various "system" events. * :doc:`device-control` - Control the blocking of USB devices on endpoints. * :doc:`differential-analysis` - Provides the ability to compare and understand the changes between two Live Query runs * :doc:`live-query` - Live Query allows operators to ask questions of endpoints * :doc:`live-response` - Live Response allows security operators to collect information and take action on remote endpoints in real time. +* :doc:`notifications-migration` - Update from Notifications to Alerts in SDK 1.5.0 or later to get the benefits of the Alerts v7 API. * :doc:`policy` - Use policies to define and prioritize rules for how applications can behave on groups of assets * :doc:`recommendations` - Work with Endpoint Standard recommendations for reputation override. * :doc:`reputation-override` - Manage reputation overrides for known applications, IT tools or certs. @@ -64,5 +68,6 @@ Migration Guides :maxdepth: 2 alerts-migration - porting-guide live-response-v6-migration + notifications-migration + porting-guide diff --git a/docs/live-query.rst b/docs/live-query.rst index cae36ca0b..615014c0f 100755 --- a/docs/live-query.rst +++ b/docs/live-query.rst @@ -91,7 +91,7 @@ information for each. There is also a helper option to get the results: >>> results_by_helper = run.query_results() -Other options +Export results ------------- It is possible to export the results in several formats including csv, zipped csv and streaming lines. These options are documented in :meth:`cbc_sdk.audit_remediation.base.ResultQuery` @@ -125,6 +125,34 @@ The sequence of calls are: >>> finished_job.get_output_as_file("/Users/myname/mydir/livequeryresults_async.csv") +Scroll results +-------------- + +If you would like to ingest all the Live Query results whether that be from one Run or multiple Runs consider using the scroll option +to fetch the latest results. The scroll option is limited to the last 24 hours for results across all Runs. You either need to specify +a time_received or a list of one or more Run ids + + >>> result_query = api.select(Result).set_time_received(range="-3h") + >>> list_results = result_query.scroll(10) + >>> print(f"num_remaining: {result_query.num_remaining}") + num_remaining: 35 + >>> while result_query.num_remaining > 0: + >>> list_results.extend(result_query.scroll(10)) + >>> print(f"total results: {len(list_results)}") + total_results: 45 + +Alternatively if you wanted to get all the results over multiple days for a single Run then use the Run's id + + >>> result_query = api.select(Result).set_run_ids([run.id]) + >>> list_results = result_query.scroll(10) + >>> print(f"num_remaining: {result_query.num_remaining}") + num_remaining: 62 + >>> while result_query.num_remaining > 0: + >>> list_results.extend(result_query.scroll(10)) + >>> print(f"total results: {len(list_results)}") + total_results: 72 + + Clean up --------- Since this is a tutorial we'll clean up when we're done by first stopping the run and then deleting it. @@ -144,8 +172,8 @@ It will not be visible in the console and attempting to refresh the object will >>> run.delete() True -A footnote on scheduled runs (templates) ----------------------------------------- +Scheduled runs (templates) +-------------------------- A template is a query that is scheduled to run periodically. It is likely easier to configured these using the Carbon Black Cloud console, but retrieving the result for import to another system may be useful. @@ -171,7 +199,3 @@ A where clause can be added to limit the templates returned. Each time the sche name = CBC SDK Demo Template id = p7qtvxms0oaju46whcrfmyppa9fiqpn9 Run id = huoobhistdtxxpzhmg52yns7wmsuvjyx, Run Status = ACTIVE, Run create time = 2022-01-19T21:00:00.000Z, Results Returned = 2333, Template Id = p7qtvxms0oaju46whcrfmyppa9fiqpn9 Run id = bdygnd8jvpjdqjmatdsuqzopaxebquqb, Run Status = TIMED_OUT, Run create time = 2022-01-18T21:00:00.000Z, Results Returned = 2988, Template Id = p7qtvxms0oaju46whcrfmyppa9fiqpn9 - - - - diff --git a/docs/notifications-migration.rst b/docs/notifications-migration.rst new file mode 100644 index 000000000..6db31e96e --- /dev/null +++ b/docs/notifications-migration.rst @@ -0,0 +1,73 @@ +.. _notifications-migration-guide: + +Notifications to Alerts Migration +================================= + +Use this guide to update from using ```get_notifications()```, which leverages the +```/integrationServices/v3/notification``` API to using Alerts in SDK v1.5.0 or higher with Alerts v7 API. + +.. note:: + The /integrationServices/v3/notification API is deprecated, and deactivation is planned for 31 October 2024. + + The Access Level Type ```SIEM``` used to access the Notifications API is also deprecated. Deactivation of the legacy access level type ```SIEM``` is planned for 31 January 2025. + + For more information about migrating from the API and alternative solutions, see + `IntegrationService notification v3 API Migration Guide `_ + + +The key differences between Notifications and Alerts are: + +* In Notifications, the criteria that defines when a notification is sent is defined in the Carbon Black Cloud console. When using the Alerts v7 API, the criteria is part of the API request + +* Notifications work on a subscription-based principle and they require a SIEM authentication key. By using that key, you are subscribing to a certain criteria of alerts. + +* As the API Notification API is deprecated, new alert types such as Intrusion Detection System Alerts cannot be retrieved from the Notifications API. + +* The Notifications endpoint is a read-once queue whereas the Alerts v7 is a search request. When calling the Alerts v7 API, the caller (your script) must manage state, keeping track of the timestamp of the last Alert retrieved and using that for the start timestamp on the next request. See the Alert Bulk Export guide for details on the polling algorithm. + +We recommend that customers evaluate the new fields that are available in Alerts v7 API and supported in SDK 1.5.0 onwards +to maximize the benefits from the new data. A lot of new metadata is included in the Alert record that can help simplify your integration. For example, if you were previously getting process information to enrich the command +line, the process commandline is now included in the Alert record. + +As at SDK 1.5.0, Notifications are deprecated and functional; there has not been a breaking change. +The underlying API will be deactivated on October 31, 2024 so you must move to Alerts in SDK 1.5.0 or newer which uses Alerts v7 API, or to the +`Data Forwarder `_ with Alert Schema 2.x before then. + +Resources +--------- + +* `IntegrationServices Notification v3 API Migration `_ +* `Carbon Black Cloud Syslog Connector 2.0 `_ +* `Alert Bulk Export `_ +* `Alerts Migration Guide `_ +* `Alerts v7 Announcement `_ +* `Alert Search and Response Fields `_ +* SDK 1.5.0 Alert Example Script `alerts_common_scenarios.py in GitHub Examples `_. +* Alerts Bulk Export Example Script `alerts_bulk_export.py in GitHub Examples `_. + +How to Update the SDK Usage +--------------------------- + +This screen shot shows the Notification configuration page in the Carbon Black Cloud console. + +.. image:: _static/cbc_platform_notification_edit.png + :alt: Editing a notification in the CBC Platform + :align: center + +You can replicate the settings shown in the screenshot by running the following search on Alerts: + +.. code-block:: python + + >>> from cbc_sdk import CBCloudAPI + >>> from cbc_sdk.platform import Alert + >>> alerts = api.select(Alert).set_minimum_severity(7).\ + >>> add_criteria("type", ["CB_ANALYTICS", "DEVICE_CONTROL"]).\ + >>> add_criteria("device_policy", "Standard") + +An Alert contains a lot more information than a Notification, and most of the fields are available for searching. + +The other modification required is that where the Notifications was a read one queue, Alerts are retrieved using a search. +An example script with the polling logic implemented is in the GitHub Repository, `alerts_bulk_export.py in GitHub Examples `_. + +There is also a guide to `Alert Bulk Export `_ +on the developer network with a detailed explanation of the logic. diff --git a/docs/searching.rst b/docs/searching.rst index 2a4e16f2e..d37735a23 100644 --- a/docs/searching.rst +++ b/docs/searching.rst @@ -346,6 +346,26 @@ search result weighted as per the criteria provided:: >>> print(synchronous_result.ranges) [{'start': '2020-10-16T00:00:00Z', 'end': '2020-11-16T00:00:00Z', 'bucket_size': '+1DAY', 'field': 'device_timestamp', 'values': None}] +Query Timeouts +-------------- + +Some search queries make use of a timeout value, specified in milliseconds, which may be specified wither through +a ``timeout`` parameter to a method, or via a ``timeout()`` setter method on a query class. These timeouts follow a +specific set of rules. + +The *absolute maximum* timeout value is 300,000 milliseconds (5 minutes). No search may have a timeout longer +than this. + +An application may specify a *shorter* maximum timeout value for all searches by including it in the credentials, +under the key name ``default_timeout``. This default timeout value may not be greater than the absolute maximum +timeout. If this value is specified, no search may have a timeout longer than this value. + +This means that, for any given search, the timeout will be the *smallest* of these values: + +* The value specified via a parameter to the search, if one was specified. +* The value configured in the credentials, if one is so configured. +* The absolute maximum timeout value, as defined above. + Search Suggestions ------------------ diff --git a/examples/platform/alerts_common_scenarios.py b/examples/platform/alerts_common_scenarios.py index a3d17072d..971cf5997 100644 --- a/examples/platform/alerts_common_scenarios.py +++ b/examples/platform/alerts_common_scenarios.py @@ -25,7 +25,7 @@ import time import json from cbc_sdk import CBCloudAPI -from cbc_sdk.platform import Alert, WatchlistAlert +from cbc_sdk.platform import Alert, WatchlistAlert, GroupedAlert from cbc_sdk.platform import Device # To see the http requests being made, and the structure of the search requests enable debug logging @@ -113,6 +113,8 @@ def main(): # Device - device - READ: For Device Searches # Alerts - org.alerts.close - EXECUTE: # Alerts - org.alerts.notes - CREATE, READ, UPDATE, DELETE + # Alerts - ThreatMetadata - org.xdr.metadata - READ + # Background tasks - Status - jobs.status - READ: To get the job status when closing alerts api = CBCloudAPI(profile="YOUR_PROFILE_HERE") @@ -125,12 +127,13 @@ def main(): # start by specifying Alert as the type of object to search alert_query = api.select(Alert) + # add_criteria is used for all fields that are searchable arrays alert_query.add_criteria("device_os", "WINDOWS") # when the field is a single value, a set_xxx function is used. alert_query.set_minimum_severity(3) # and limit the time to the last day - alert_query.set_time_range(range="-1d") + alert_query.set_time_range(range="-10d") # rows default to 100, let's override that alert_query.set_rows(1000) # and I think that Watchlist alerts are really noisy, so I'm going to exclude them from the results @@ -189,8 +192,9 @@ def main(): # Contextual information around the Alert # Observations observation_list = alert.get_observations() - len(observation_list) # force the query execution - print("There are {} related observations".format(len(observation_list))) + if observation_list is not None: + len(observation_list) # force the query execution + print("There are {} related observations".format(len(observation_list))) # Which device was this alert on? device = api.select(Device, alert.device_id) @@ -209,6 +213,54 @@ def main(): print("This is the process for the watchlist alert") print(process) + # For watchlist alerts in particular sometimes we would like to know more obout the associated watchlists + print("This is the list of watchlist id name pairs for this alert:") + print(watchlist_alert.get("watchlists")) + + watchlist_objects = watchlist_alert.get_watchlist_objects() + print("These objects are associated with this alerts watchlists:") + for object in watchlist_objects: + print(object) + + # Run a Grouped Alert Search to group our alerts by threat_id + # Start by specifying a GroupedAlert as the type of object to search + grouped_alert_search_query = api.select(GroupedAlert) + # then much like our AlertSearchQuery define the search query + grouped_alert_search_query = grouped_alert_search_query.set_time_range(range="-10d")\ + .add_criteria("type", "WATCHLIST").set_minimum_severity(1) + # run the query to retrieve + grouped_alert_search_query.all() + # and iterate through our GroupAlert objects + print([group_alert for group_alert in grouped_alert_search_query]) + + # to retrieve only the first GroupAlert object + group_alert = grouped_alert_search_query.first() + # to view the most recent alert on the object + print(group_alert.most_recent_alert_) + + # to create the alert search query for a given group alert + alert_search_query = group_alert.get_alert_search_query() + print([alert for alert in alert_search_query]) + + # to convert an AlertSearchQuery to a GroupAlertSearchQuery, will not preserve sort order + group_alert_search_query = alert_search_query.set_group_by("threat_id") + + # to convert a GroupAlertSearchQuery to an AlertSearchQuery, will not preserve sort order + alert_search_query = group_alert_search_query.get_alert_search_query() + + # to create the facets on a grouped alert search query + grouped_alert_facets = group_alert_search_query.facets(["type", "THREAT_ID"], 0, True) + print(grouped_alert_facets) + + # to retrieve the Network Threat Metadata from an ids alert we first retrieve an ids alert + alert_query = api.select(Alert) + alert_query.add_criteria("type", "INTRUSION_DETECTION_SYSTEM").set_time_range(range="-6M") + ids_alert = alert_query.first() + + # then just call the get_network_threat_metadata + network_threat_metadata = ids_alert.get_network_threat_metadata() + print(network_threat_metadata) + if __name__ == "__main__": # Trap keyboard interrupts while running the script. diff --git a/examples/platform/asset_groups.py b/examples/platform/asset_groups.py new file mode 100644 index 000000000..a6565bb32 --- /dev/null +++ b/examples/platform/asset_groups.py @@ -0,0 +1,243 @@ +#!/usr/bin/env python +# ******************************************************* +# Copyright (c) VMware, Inc. 2020-2024. All Rights Reserved. +# SPDX-License-Identifier: MIT +# ******************************************************* +# * +# * DISCLAIMER. THIS PROGRAM IS PROVIDED TO YOU "AS IS" WITHOUT +# * WARRANTIES OR CONDITIONS OF ANY KIND, WHETHER ORAL OR WRITTEN, +# * EXPRESS OR IMPLIED. THE AUTHOR SPECIFICALLY DISCLAIMS ANY IMPLIED +# * WARRANTIES OR CONDITIONS OF MERCHANTABILITY, SATISFACTORY QUALITY, +# * NON-INFRINGEMENT AND FITNESS FOR A PARTICULAR PURPOSE. + +""" +This example shows how to use the Asset Groups API and interact with related Policies and Devices. + +The SDK documentation is published on Read The Docs. An Asset Groups Guide is available there. +https://carbon-black-cloud-python-sdk.readthedocs.io + +""" + +import sys +import time +from cbc_sdk import CBCloudAPI +from cbc_sdk.platform import AssetGroup, Policy, Device + +# To see the http requests being made, and the structure of the search requests enable debug logging +# import logging +# logging.basicConfig(level=logging.DEBUG) + + +def demo_preview_policy_rank_change(api): + """Demonstrate previewing the changes to effective policies on assets if policy ranking is changed. + + Once Asset Groups have been created and policies assigned, the policy rank preview function can be used to determine + the impact of changing policy rankings. + This example finds the policy in the highest rank that has assets affected, and then moves it one position lower. + 1 is the highest rank. + """ + # Start by finding the highest ranked policy + print("\n\n Starting preview_policy_rank_change \n\n") + all_policies = list(api.select(Policy).all()) + policy_top_rank = None + policy_num_devices = 0 + + for policy in all_policies: + tmp_policy_num_devices = len(api.select(Device).set_policy_ids([policy.id])) + if tmp_policy_num_devices > 0: + if policy_top_rank is None: + policy_top_rank = policy + policy_num_devices = tmp_policy_num_devices + elif policy.position < policy_top_rank.position: + policy_top_rank = policy + policy_num_devices = tmp_policy_num_devices + + # This is the highest ranking policy that has devices associated. + # Since this is the highest ranked policy, it will be the effective policy for those assets. + print("Policy {} with id = {}, is at rank {} and the policy affects {} members". + format(policy_top_rank.name, policy_top_rank.id, policy_top_rank.position, policy_num_devices)) + + # We're going to preview the impacts of moving the policy one position down the ranking (1 is the top) + new_policy_position = policy_top_rank.position + 1 + + # preview what would change if the policy at the top position moved down one rank. + changes = Policy.preview_policy_rank_changes(api, [(policy_top_rank.id, new_policy_position)]) + print_changes(changes) + print("\n\n Finished preview_policy_rank_change \n\n") + + +def demo_preview_asset_group_changes(api): + """Show how to use the preview asset group function to understand the impact of changes such as changing a query + + Once Asset Groups have been created and policies assigned, the preview asset group changes function can be used to + identify the devices that would have their group membership or effective policy impacted. + """ + print("\n\n Starting preview_asset_group_change \n\n") + # Get an asset group to work with + asset_group_1 = api.select(AssetGroup).first() + # Get the top and second ranked policies + top_policy = None + second_policy = None + for policy in api.select(Policy): + if policy.position == 1: + top_policy = policy + if policy.position == 2: + second_policy = policy + + changes = None + # Preview the changes that would happen if the policy is changed to the top rank. + # In the case where it already has the top ranked policy, change it to the second ranked policy. + # Send in the exising query - not changing. + if asset_group_1.policy_id is None or asset_group_1.policy_id != top_policy.id: + changes = AssetGroup.preview_update_asset_groups(api, [asset_group_1], top_policy.id, asset_group_1.query) + else: + changes = AssetGroup.preview_update_asset_groups(api, [asset_group_1], second_policy.id, asset_group_1.query) + print("Changes from setting a new policy on the asset group") + print_changes(changes) + + # Preview adding a member to a group. Note that if the device is already in the group, there will be no changes + device = api.select(Device).first() + changes = AssetGroup.preview_add_members_to_groups(api, [device.id], [asset_group_1]) + print("Changes from adding a device to the asset group") + print_changes(changes) + + # Preview the changes to devices if a new asset group is created + changes = AssetGroup.preview_create_asset_group(api, top_policy.id, "os.equals:MAC") + print("Changes from creating a new asset group") + print_changes(changes) + + changes = AssetGroup.preview_delete_asset_groups(api, [asset_group_1]) + print("Changes from deleting an asset group") + print_changes(changes) + + +def print_changes(changes): + """Iterate through the changes object and print the content with contextual information.""" + if len(changes) == 0: + print("No changes would occur.") + else: + print("There are {} changes that would result from the proposed change".format(len(changes))) + + for change_counter, change in enumerate(changes, 1): + print("printing change number {}".format(change_counter)) + print("{} assets will be affected.".format(change.asset_count)) + print("The assets affected are:") + assets_affected = change.asset_query.all() + for asset in assets_affected: + print("Asset Name: {} - Asset Id {}".format(asset.name, asset.id)) + print("\n The currently effective policy for those assets is: name: {}, id: {}". + format(change.current_policy.name, change.current_policy.id)) + print("\n The effective policy after the move would be: name: {}, id: {}". + format(change.new_policy.name, change.new_policy.id)) + print("\n\n") + + +def main(): + """This script demonstrates how to use Asset Groups in the SDK and common operations to link to related objects. + + This example does not use command line parsing in order to reduce complexity and focus on the SDK functions. + Review the Authentication section of the Read the Docs for information about Authentication in the SDK + https://carbon-black-cloud-python-sdk.readthedocs.io/en/latest/authentication/ + + This is written for clarity of explanation, not perfect coding practices. + """ + # CBCloudAPI is the connection to the cloud. It holds the credentials for connectivity. + # To execute this script, the profile must have an API key with the following permissions. + # If you are restricted in the actions you're allowed to perform, expect a 403 response for missing permissions. + # Permissions are set on Settings -> API Access -> Access Level and then assigned to an API Key + # GM - Group Management - group-management - CREATE, READ, UPDATE, DELETE: For managing asset groups + # Device - General Information - device - READ: For getting device information + # Policies - Policies - org.policies - READ: For viewing policy information and pre-viewing the impact of changes + # to policy ranking and asset groups. + + api = CBCloudAPI(profile="YOUR_PROFILE_HERE") + + # to get all asset groups, a static method is available on the AssetGroup class. + # This is useful for listing the groups configured in your org + all_asset_groups = AssetGroup.get_all_groups(api) + for ag in all_asset_groups: + print("asset group name: {}, member count: {}".format(ag.name, ag.member_count)) + + # Create an asset group. The only mandatory field is the Asset Group Name. + # It can be created without a policy, which enables the use of group for visibility of specific assets + # It can be created without a query, which enables manual assignment of assets to the group later + new_asset_group = AssetGroup.create_group(api, "My Example Asset Group", description="Demonstrating the SDK") + print(new_asset_group) + + # Add a query. All assets that match this criteria will be dynamically added to the group + new_asset_group.query = "os.equals:WINDOWS" + # Assign a policy. All assets in the group may have this policy applied. If an asset is in more than one group, + # policy ranking determines which is the effective policy. + # Choosing the lowest ranked policy as this is the least likely to actually change the behaviour while experimenting + # with a script. + bottom_rank_policy = None + for p in api.select(Policy).all(): + if bottom_rank_policy is None or p.position > bottom_rank_policy.position: + bottom_rank_policy = p + new_asset_group.policy_id = bottom_rank_policy.id + new_asset_group.save() + print("\n\n new_asset_group {}".format(new_asset_group)) + # Clean up after ourselves and delete the asset group + new_asset_group.delete() + + # An asset group can also be created with a query and / or a policy included + print("\n\n Second asset group with policy and query") + second_name = "Second demo group" + second_asset_group = AssetGroup.create_group(api, second_name, "Second group description", + query="os.equals:MAC", policy_id=bottom_rank_policy.id) + second_asset_group.refresh() + # The system is asynchronous and eventually consistent. When writing automated scripts, use the status field to + # determine when the asset group membership has finished updating. + # OK indicates the membership evaluation is complete + # UPDATING indicates that group’s dynamic memberships are being re-evaluated + while second_asset_group.status != "OK": + print("waiting") + time.sleep(10) + second_asset_group.refresh() + # Asset groups can be searched + search_asset_group_query = api.select(AssetGroup).add_criteria("name", second_name).sort_by("name", "ASC") + for ag in search_asset_group_query: + print("\n\nAsset group name = {}. It has {} members".format(ag.name, ag.member_count)) + print("Policy assigned to the Asset Group is Name: {}, Id: {}".format(ag.policy_name, ag.policy_id)) + # These are the assets that are now part of the dynamic asset group + for d in ag.list_members(): + print("Device Name: {}, Id: {}".format(d.name, d.id)) + if d.policy_id == bottom_rank_policy.id: + print("The effective policy is from the asset group") + else: + print("This asset group does not determine the effective policy The effective policy is {} - {}" + .format(d.policy_id, d.policy_name)) + + # Asset groups can have members from a query, and also direct (manual) assignment. + # Assign a device directly to the second asset group + random_device = api.select(Device).first() + second_asset_group.add_members(random_device) + second_asset_group.refresh() + while second_asset_group.status != "OK": + print("waiting") + time.sleep(10) + second_asset_group.refresh() + # The number of assets in the group may not change, if the randomly selected one is already a member of that group. + print("\n\nsecond_asset_group with device assigned {}".format(second_asset_group)) + # remove the device + second_asset_group.remove_members(random_device) + print(second_asset_group) + # Clean up after ourselves and delete the asset group + second_asset_group.delete() + + # Step into the method to see the steps to select a policy and preview the impact changing it's rank would have + demo_preview_policy_rank_change(api) + # Step into the method to see methods available to preview the impact changing things such as the assigned policy + # on an asset group or creating a new asset group would have. + demo_preview_asset_group_changes(api) + + print("The End") + + +if __name__ == "__main__": + # Trap keyboard interrupts while running the script. + try: + sys.exit(main()) + except KeyboardInterrupt: + print("\nKeyboard interrupt\n") + sys.exit(0) diff --git a/src/cbc_sdk/__init__.py b/src/cbc_sdk/__init__.py index 7d0444a4c..af0fe893a 100644 --- a/src/cbc_sdk/__init__.py +++ b/src/cbc_sdk/__init__.py @@ -3,8 +3,8 @@ __title__ = 'cbc_sdk' __author__ = 'Carbon Black Developer Network' __license__ = 'MIT' -__copyright__ = 'Copyright 2020-2023 VMware Carbon Black' -__version__ = '1.5.0' +__copyright__ = 'Copyright 2020-2024 VMware Carbon Black' +__version__ = '1.5.1' from .rest_api import CBCloudAPI from .cache import lru diff --git a/src/cbc_sdk/audit_remediation/base.py b/src/cbc_sdk/audit_remediation/base.py index 7c0cb5704..9a676407a 100644 --- a/src/cbc_sdk/audit_remediation/base.py +++ b/src/cbc_sdk/audit_remediation/base.py @@ -336,6 +336,15 @@ def __init__(self, cb, initial_data): else: self._metrics = Result.Metrics(cb, initial_data=None) + def to_json(self): + """ + Return a json object of the response. + + Returns: + dict: The raw json Result. + """ + return self._info + @property def device_(self): """Returns the reified `Result.Device` for this result.""" @@ -1080,6 +1089,8 @@ def __init__(self, doc_class, cb): self._sort = {} self._batch_size = 100 self._run_id = None + self.num_remaining = None + self._search_after = None def set_device_ids(self, device_ids): """ @@ -1197,7 +1208,7 @@ def run_id(self, run_id): Sets the run ID to query results for. Arguments: - run_id (int): The run ID to retrieve results for. + run_id (str): The run ID to retrieve results for. Returns: ResultQuery: ResultQuery object with specified run_id. @@ -1208,6 +1219,50 @@ def run_id(self, run_id): self._run_id = run_id return self + def set_run_ids(self, run_ids): + """ + Sets the run IDs to query results for. + + Note: + Only supported for scroll + + Arguments: + run_ids (list[str]): The run IDs to retrieve results for. + + Returns: + ResultQuery: ResultQuery object with specified run_id. + """ + self._criteria["run_id"] = run_ids + return self + + def set_time_received(self, start=None, end=None, range=None): + """ + Set the time received to query results for. + + Note: If you are using scroll you may only specify range, or start and end. range supports max of 24hrs + + Args: + start(str): Start time in ISO8601 UTC format + end(str): End time in ISO8601 UTC format + range(str): Relative time window using the following allowed time units y years, w weeks, d days, h hours, + m minutes, s seconds + + Returns: + ResultQuery: ResultQuery object with specified time_received. + """ + if (start or end) and range: + raise ApiError("You cannot specify both a fixed start/end timestamp and a range") + + self._criteria["time_received"] = {} + + if range: + self._criteria["time_received"]["range"] = range + else: + self._criteria["time_received"]["start"] = start + self._criteria["time_received"]["end"] = end + + return self + def _build_request(self, start, rows): """ Creates the request body for an API call. @@ -1302,6 +1357,50 @@ def _perform_query(self, start=0, rows=0): still_querying = False break + def scroll(self, rows=10000): + """ + Iteratively fetch results across Live Query Runs or paginate all results beyond the 10k search limits. + + To fetch the next set of results repeatively call the scroll function until + `ResultQuery.num_remaining == 0` or no results are returned. + + Note: You must specify either a set_time_received or a set_run_ids on the query before using scroll + + Args: + rows (int): The number of rows to fetch + + Returns: + list[Result]: The list of results + """ + if self.num_remaining == 0: + return [] + elif rows > 10000: + rows = 10000 + + url = f"/livequery/v1/orgs/{self._cb.credentials.org_key}/runs/results/_scroll" + + # Sort by time_received enforced + self._sort = {} + + request = self._build_request(0, rows) + del request["start"] + + if self._search_after is not None: + request["search_after"] = self._search_after + + resp = self._cb.post_object(url, body=request) + resp_json = resp.json() + + # Capture latest state + self.num_remaining = resp_json["num_remaining"] + self._search_after = resp_json["search_after"] + + results = [] + for item in resp_json["results"]: + results.append(self._doc_class(self._cb, item)) + + return results + def _init_async_query(self): """ Initialize an async query and return a context for running in the background. Optional. @@ -1591,7 +1690,7 @@ def run_id(self, run_id): Sets the run ID to query results for. Arguments: - run_id (int): The run ID to retrieve results for. + run_id (str): The run ID to retrieve results for. Returns: FacetQuery: FacetQuery object with specified run_id. diff --git a/src/cbc_sdk/base.py b/src/cbc_sdk/base.py index 9142aa238..40c7f115b 100644 --- a/src/cbc_sdk/base.py +++ b/src/cbc_sdk/base.py @@ -60,9 +60,9 @@ def construct_include(loader, node): with open(filename, 'rb') as f: if extension in ('yaml', 'yml'): return yaml.load(f, SwaggerLoader) - elif extension in ('json', ): + elif extension in ('json', ): # pragma: no cover return json.load(f) - else: + else: # pragma: no cover return ''.join(f.readlines()) @@ -97,7 +97,7 @@ def __new__(mcs, name, bases, clsdict): class_docstr = clsdict.get('__doc__', None) if not class_docstr: - class_docstr = f"Represents a {name} object in the Carbon Black Cloud." + class_docstr = f"Represents a {name} object in the Carbon Black Cloud." # pragma: no cover need_header = True for field_name, field_info in iter(model_data.get("properties", {}).items()): docstring = field_info.get("description", None) @@ -235,7 +235,7 @@ def __get__(self, instance, instance_type=None): return ret or {} -class IsoDateTimeFieldDescriptor(FieldDescriptor): +class IsoDateTimeFieldDescriptor(FieldDescriptor): # pragma: no cover """Field descriptor for fields of 'iso-date-time' type.""" def __init__(self, field_name): """ @@ -272,7 +272,7 @@ def __set__(self, instance, value): super(IsoDateTimeFieldDescriptor, self).__set__(instance, parsed_date) -class EpochDateTimeFieldDescriptor(FieldDescriptor): +class EpochDateTimeFieldDescriptor(FieldDescriptor): # pragma: no cover """Field descriptor for fields of 'epoch-ms-date-time' type.""" def __init__(self, field_name, multiplier=1.0): """ @@ -318,7 +318,7 @@ def __set__(self, instance, value): super(EpochDateTimeFieldDescriptor, self).__set__(instance, new_value) -class ForeignKeyFieldDescriptor(FieldDescriptor): +class ForeignKeyFieldDescriptor(FieldDescriptor): # pragma: no cover """Field descriptor for fields that are foreign keys.""" def __init__(self, field_name, join_model, join_field=None): """ @@ -364,7 +364,7 @@ def __set__(self, instance, value): setattr(self, self.join_field, value) -class BinaryFieldDescriptor(FieldDescriptor): +class BinaryFieldDescriptor(FieldDescriptor): # pragma: no cover """Field descriptor for fields of 'byte' type.""" def __get__(self, instance, instance_type=None): """ @@ -502,11 +502,11 @@ def __getitem__(self, item): return self._info[item] # if we're still here, let's load the object if we haven't done so already. - if not self._full_init: + if not self._full_init: # pragma: no cover self._refresh() # try one more time. - if item in self._info: + if item in self._info: # pragma: no cover return self._info[item] else: raise AttributeError("'{0}' object has no attribute '{1}'".format(self.__class__.__name__, @@ -541,7 +541,7 @@ def get(self, attrname, default_val=None): """ return getattr(self, attrname, default_val) - def _set(self, attrname, new_value): + def _set(self, attrname, new_value): # pragma: no cover pass def refresh(self): @@ -582,7 +582,7 @@ def __repr__(self): if self._model_unique_id is not None: return "<%s.%s: id %s> @ %s" % (self.__class__.__module__, self.__class__.__name__, self._model_unique_id, self._cb.session.server) - else: + else: # pragma: no cover return "<%s.%s object at %s> @ %s" % (self.__class__.__module__, self.__class__.__name__, hex(id(self)), self._cb.session.server) @@ -618,7 +618,7 @@ def _str_stringize(cls, value): """ try: string_value = str(value) - except UnicodeDecodeError: + except UnicodeDecodeError: # pragma: no cover string_value = repr(value) if len(string_value) > NewBaseModel.MAX_VALUE_WIDTH: string_value = string_value[:NewBaseModel.MAX_VALUE_WIDTH - 3] + "..." @@ -762,7 +762,7 @@ def __str__(self): return "\n".join(lines) - def _join(self, join_cls, field_name): + def _join(self, join_cls, field_name): # pragma: no cover try: field_value = getattr(self, field_name) except AttributeError: @@ -773,11 +773,20 @@ def _join(self, join_cls, field_name): return self._cb.select(join_cls, field_value) + def to_json(self): + """ + Return a json object of the response. + + Returns: + Any: The response dictionary representation. + """ + return copy.deepcopy(self._info) + class UnrefreshableModel(NewBaseModel): """Represents a model that can't be refreshed, i.e. for which ``reset()`` is not a valid operation.""" - def refresh(self): + def refresh(self): # pragma: no cover """Reload this object from the server.""" raise ApiError("refresh() called on an unrefreshable model") @@ -802,7 +811,7 @@ def __setattr__(self, attrname, val): return propobj.fset(self, val) if not attrname.startswith("_") and attrname not in self.__class__._valid_fields: - if attrname in self._info: + if attrname in self._info: # pragma: no cover log.warning("Changing field not included in Swagger definition: {0:s}".format(attrname)) self._set(attrname, val) else: @@ -1028,7 +1037,7 @@ def all(self): Returns: list: List of query items """ - return self._perform_query() + return list(self._perform_query()) def first(self): """ @@ -1656,7 +1665,7 @@ def add_criteria(self, key, newlist): >>> query = api.select(Alert).add_criteria("type", "CB_ANALYTIC") """ if not isinstance(newlist, list): - if not isinstance(newlist, str) and not isinstance(newlist, int): + if not isinstance(newlist, str) and not isinstance(newlist, int) and not isinstance(newlist, bool): raise ApiError("Criteria value(s) must be a string, int or list of strings or ints. " f"{newlist} is a {type(newlist)}.") self._update_criteria(key, [newlist], overwrite=True) @@ -2087,8 +2096,8 @@ def __init__(self, cls, cb, query=None): self._query_token = None # whether self._total_results is a valid value self._count_valid = False - # seconds to wait for num_contacted == num_completed until timing out - self._timeout = 0 + # milliseconds to wait for num_contacted == num_completed until timing out + self._timeout = cb.credentials.default_timeout # whether the query timed-out self._timed_out = False # query body parameters @@ -2102,19 +2111,23 @@ def __init__(self, cls, cb, query=None): self._default_args = {} def timeout(self, msecs): - """Sets the timeout on an AsyncQuery. By default, there is no timeout. + """ + Sets the timeout on an AsyncQuery. Arguments: - msecs (int): Timeout duration, in milliseconds. + msecs (int): Timeout duration, in milliseconds. This value can never be greater than the configured + default timeout. If this is 0, the configured default timeout value is used. Returns: - Query (AsyncQuery): The Query object with new milliseconds - parameter. + Query (AsyncQuery): The Query object with new milliseconds parameter. Example: >>> cb.select(ProcessFacet).where(process_name="foo.exe").timeout(5000) """ - self._timeout = msecs + if msecs <= 0: + self._timeout = self._cb.credentials.default_timeout + else: + self._timeout = min(msecs, self._cb.credentials.default_timeout) return self def limit(self, limit): @@ -2308,6 +2321,7 @@ def _submit(self): self._submit_time = time.time() * 1000 def _still_querying(self): + assert self._timeout > 0 if not self._query_token: self._submit() @@ -2320,7 +2334,7 @@ def _still_querying(self): if searchers_contacted == 0: return True if searchers_completed < searchers_contacted: - if self._timeout != 0 and (time.time() * 1000) - self._submit_time > self._timeout: + if (time.time() * 1000) - self._submit_time > self._timeout: self._timed_out = True return False return True diff --git a/src/cbc_sdk/connection.py b/src/cbc_sdk/connection.py index 0d95b8779..72251be24 100644 --- a/src/cbc_sdk/connection.py +++ b/src/cbc_sdk/connection.py @@ -418,7 +418,7 @@ def __init__(self, *args, **kwargs): Uses the profile named 'default' when not specified. proxy_session (requests.session.Session): Proxy session to be used for cookie persistence, connection pooling, and configuration. Default is ``None`` (use the standard session). - timeout (float): The timeout to use for for API requests. Default is ``None`` (no timeout). + timeout (float): The timeout to use for API request connections. Default is ``None`` (no timeout). token (str): The API token to use when accessing the Carbon Black Cloud. url (str): The URL of the Carbon Black Cloud provider to use. """ diff --git a/src/cbc_sdk/credential_providers/registry_credential_provider.py b/src/cbc_sdk/credential_providers/registry_credential_provider.py index a1ca9570c..2d6c3b156 100755 --- a/src/cbc_sdk/credential_providers/registry_credential_provider.py +++ b/src/cbc_sdk/credential_providers/registry_credential_provider.py @@ -28,7 +28,7 @@ HKEY_LOCAL_MACHINE = winreg.HKEY_LOCAL_MACHINE OpenKey = winreg.OpenKey QueryValueEx = winreg.QueryValueEx -except ModuleNotFoundError: +except ModuleNotFoundError: # pragma: no cover HKEY_CURRENT_USER = object() HKEY_LOCAL_MACHINE = object() @@ -79,7 +79,7 @@ def _base_key(self): """ return HKEY_CURRENT_USER if self._userkey else HKEY_LOCAL_MACHINE - def _open_key(self, basekey, path): + def _open_key(self, basekey, path): # pragma: no cover """ Open a key for use. This is a "test point" intended to be monkeypatched. @@ -98,7 +98,7 @@ def _open_key(self, basekey, path): except OSError as e: raise CredentialError(f"Unable to open registry subkey: {path}") from e - def _read_value(self, key, value_name): + def _read_value(self, key, value_name): # pragma: no cover """ Read a value from the registry key specified. This is a "test point" intended to be monkeypatched. @@ -162,6 +162,27 @@ def _read_bool(self, key, value_name): return val[0] != 0 return None + def _read_int(self, key, value_name): + """ + Read an integer value from the registry key specified. + + Args: + key (PyHKEY): The key to read a value from. + value_name (str): The name of the value to be returned. + + Returns: + int: The value read in. May return None if the value was not found. + + Raises: + CredentialError: If there was an error reading the value, or if the value was of the wrong type. + """ + val = self._read_value(key, value_name) + if val: + if val[1] != REG_DWORD: + raise CredentialError(f"value '{value_name}` is not of integer type") + return val[0] + return None + def _read_credentials(self, key): """ Read in a complete credentials set from a registry key. @@ -181,6 +202,10 @@ def _read_credentials(self, key): value = self._read_bool(key, cv.name.lower()) if value is not None: input[cv] = value + elif cv.requires_integer_value(): + value = self._read_int(key, cv.name.lower()) + if value is not None: + input[cv] = value else: value = self._read_str(key, cv.name.lower()) if value is not None: diff --git a/src/cbc_sdk/credentials.py b/src/cbc_sdk/credentials.py index 863e3f804..e0aa304e8 100644 --- a/src/cbc_sdk/credentials.py +++ b/src/cbc_sdk/credentials.py @@ -39,6 +39,7 @@ class CredentialValue(Enum): CSP_OAUTH_APP_SECRET = auto() CSP_API_TOKEN = auto() CSP_URL_OVERRIDE = auto() + DEFAULT_TIMEOUT = auto() def requires_boolean_value(self): """ @@ -49,15 +50,30 @@ def requires_boolean_value(self): """ return self in _bool_valued_credentials + def requires_integer_value(self): + """ + Return whether or not this credential requires an integer value. + + Returns: + bool: True if the credential requires an integer value, False if not. + """ + return self in _int_valued_credentials + # The credentials that have Boolean values _bool_valued_credentials = [CredentialValue.SSL_VERIFY, CredentialValue.SSL_VERIFY_HOSTNAME, CredentialValue.SSL_FORCE_TLS_1_2, CredentialValue.IGNORE_SYSTEM_PROXY] +# The credentials that have integer values +_int_valued_credentials = [CredentialValue.DEFAULT_TIMEOUT] + # The possible string values that translate to Boolean _bool_values = {"0": False, "no": False, "off": False, "false": False, "1": True, "yes": True, "on": True, "true": True} +# The maximum value that the default timeout may have under any circumstances. +MAX_DEFAULT_TIMEOUT = 300000 + # === THE CREDENTIALS DATA OBJECT === # @@ -89,7 +105,8 @@ def __init__(self, values=None): CredentialValue.CSP_OAUTH_APP_ID: None, CredentialValue.CSP_OAUTH_APP_SECRET: None, CredentialValue.CSP_API_TOKEN: None, - CredentialValue.CSP_URL_OVERRIDE: "https://console.cloud.vmware.com" + CredentialValue.CSP_URL_OVERRIDE: "https://console.cloud.vmware.com", + CredentialValue.DEFAULT_TIMEOUT: MAX_DEFAULT_TIMEOUT } if values is not None: for k in list(CredentialValue): @@ -98,6 +115,9 @@ def __init__(self, values=None): elif k.name.lower() in values: self._set_value(k, values[k.name.lower()]) + if self.get_value(CredentialValue.DEFAULT_TIMEOUT) > MAX_DEFAULT_TIMEOUT: + self._set_value(CredentialValue.DEFAULT_TIMEOUT, MAX_DEFAULT_TIMEOUT) + self._token_type = "UNKNOWN" if self.get_value(CredentialValue.TOKEN) is not None: self._token_type = "API_KEY" @@ -129,6 +149,16 @@ def _set_value(self, key, value): self._values[key] = _bool_values[value.lower()] else: raise CredentialError(f"Invalid boolean value '{value}' for credential '{key.name}'") + elif key.requires_integer_value(): + if isinstance(value, int): + self._values[key] = value + elif isinstance(value, str): + try: + self._values[key] = int(value) + except ValueError: + raise CredentialError(f"Invalid integer value '{value}' for credential '{key.name}'") + else: + raise CredentialError(f"Invalid integer value '{value}' for credential '{key.name}'") else: self._values[key] = value @@ -243,7 +273,7 @@ def get_token(self): class CredentialProvider: """The interface implemented by a credential provider.""" - def get_credentials(self, section=None): + def get_credentials(self, section=None): # pragma: no cover """ Return a Credentials object containing the configured credentials. diff --git a/src/cbc_sdk/endpoint_standard/base.py b/src/cbc_sdk/endpoint_standard/base.py index 0cfdf87f9..16651fd95 100644 --- a/src/cbc_sdk/endpoint_standard/base.py +++ b/src/cbc_sdk/endpoint_standard/base.py @@ -104,7 +104,7 @@ def __init__(self, cb, model_unique_id=None, initial_data=None, force_init=False force_init (bool): True to force object initialization. full_doc (bool): True to mark the object as fully initialized. """ - self._details_timeout = 0 + self._details_timeout = cb.credentials.default_timeout self._info = None if model_unique_id is not None and initial_data is None: enriched_event_future = cb.select(EnrichedEvent).where(event_id=model_unique_id).execute_async() @@ -130,14 +130,18 @@ def get_details(self, timeout=0, async_mode=False): """Requests detailed results. Args: - timeout (int): Event details request timeout in milliseconds. + timeout (int): Event details request timeout in milliseconds. This value can never be greater than + the configured default timeout. If this value is 0, the configured default timeout is used. async_mode (bool): True to request details in an asynchronous manner. Note: - When using asynchronous mode, this method returns a python future. You can call result() on the future object to wait for completion and get the results. """ - self._details_timeout = timeout + if timeout <= 0: + self._details_timeout = self._cb.credentials.default_timeout + else: + self._details_timeout = min(timeout, self._cb.credentials.default_timeout) if not self.event_id: raise ApiError("Trying to get event details on an invalid event_id") if async_mode: @@ -147,6 +151,7 @@ def get_details(self, timeout=0, async_mode=False): def _get_detailed_results(self): """Actual search details implementation""" + assert self._details_timeout > 0 args = {"event_ids": [self.event_id]} url = "/api/investigate/v2/orgs/{}/enriched_events/detail_jobs".format(self._cb.credentials.org_key) query_start = self._cb.post_object(url, body=args) @@ -167,7 +172,7 @@ def _get_detailed_results(self): time.sleep(.5) continue if searchers_completed < searchers_contacted: - if self._details_timeout != 0 and (time.time() * 1000) - submit_time > self._details_timeout: + if (time.time() * 1000) - submit_time > self._details_timeout: timed_out = True break else: @@ -337,7 +342,7 @@ def __init__(self, doc_class, cb): super(EnrichedEventQuery, self).__init__(doc_class, cb) self._default_args["rows"] = self._batch_size self._query_token = None - self._timeout = 0 + self._timeout = cb.credentials.default_timeout self._timed_out = False self._aggregation = False self._aggregation_field = None @@ -383,16 +388,19 @@ def timeout(self, msecs): """Sets the timeout on a event query. Arguments: - msecs (int): Timeout duration, in milliseconds. + msecs (int): Timeout duration, in milliseconds. This value can cever be greater than the configured + default timeout. If this value is 0, the configured default timeout is used. Returns: - Query (EnrichedEventQuery): The Query object with new milliseconds - parameter. + Query (EnrichedEventQuery): The Query object with new milliseconds parameter. Example: >>> cb.select(EnrichedEvent).where(process_name="foo.exe").timeout(5000) """ - self._timeout = msecs + if msecs <= 0: + self._timeout = self._cb.credentials.default_timeout + else: + self._timeout = min(msecs, self._cb.credentials.default_timeout) return self def _submit(self): @@ -412,6 +420,7 @@ def _submit(self): self._submit_time = time.time() * 1000 def _still_querying(self): + assert self._timeout > 0 if not self._query_token: self._submit() @@ -429,7 +438,7 @@ def _still_querying(self): if searchers_contacted == 0: return True if searchers_completed < searchers_contacted: - if self._timeout != 0 and (time.time() * 1000) - self._submit_time > self._timeout: + if (time.time() * 1000) - self._submit_time > self._timeout: self._timed_out = True return False return True diff --git a/src/cbc_sdk/enterprise_edr/auth_events.py b/src/cbc_sdk/enterprise_edr/auth_events.py index 4334b70bd..10836a46e 100644 --- a/src/cbc_sdk/enterprise_edr/auth_events.py +++ b/src/cbc_sdk/enterprise_edr/auth_events.py @@ -29,14 +29,7 @@ class AuthEvent(NewBaseModel): validation_url = "/api/investigate/v2/orgs/{}/auth_events/search_validation" swagger_meta_file = "enterprise_edr/models/auth_events.yaml" - def __init__( - self, - cb, - model_unique_id=None, - initial_data=None, - force_init=False, - full_doc=False, - ): + def __init__(self, cb, model_unique_id=None, initial_data=None, force_init=False, full_doc=False): """ Initialize the AuthEvent object. @@ -55,7 +48,7 @@ def __init__( >>> events = cb.select(AuthEvent).where("auth_username:SYSTEM") >>> print(*events) """ - self._details_timeout = 0 + self._details_timeout = cb.credentials.default_timeout self._info = None if model_unique_id is not None and initial_data is None: auth_events_future = ( @@ -105,7 +98,8 @@ def get_details(self, timeout=0, async_mode=False): """Requests detailed results. Args: - timeout (int): AuthEvent details request timeout in milliseconds. + timeout (int): AuthEvent details request timeout in milliseconds. This can never be greater than the + configured default timeout. If this is 0, the configured default timeout is used. async_mode (bool): True to request details in an asynchronous manner. Returns: @@ -121,7 +115,10 @@ def get_details(self, timeout=0, async_mode=False): >>> events = cb.select(AuthEvent).where(process_pid=2000) >>> print(events[0].get_details()) """ - self._details_timeout = timeout + if timeout <= 0 or timeout > self._cb.credentials.default_timeout: + self._details_timeout = self._cb.credentials.default_timeout + else: + self._details_timeout = timeout if not self.event_id: raise ApiError( "Trying to get auth_event details on an invalid auth_event_id" @@ -153,7 +150,8 @@ def _helper_get_details(cb, alert_id=None, event_ids=None, bulk=False, timeout=0 alert_id (str): An alert id to fetch associated auth_events event_ids (list): A list of auth_event ids to fetch bulk (bool): Whether it is a bulk request - timeout (int): AuthEvents details request timeout in milliseconds. + timeout (int): AuthEvents details request timeout in milliseconds. This can never be greater than the + configured default timeout. If this value is 0, the configured default timeout is used. Returns: AuthEvent or list(AuthEvent): if it is a bulk operation a list, otherwise AuthEvent @@ -161,6 +159,8 @@ def _helper_get_details(cb, alert_id=None, event_ids=None, bulk=False, timeout=0 Raises: ApiError: if cb is not instance of CBCloudAPI """ + if timeout <= 0 or timeout > cb.credentials.default_timeout: + timeout = cb.credentials.default_timeout if cb.__class__.__name__ != "CBCloudAPI": raise ApiError("cb argument should be instance of CBCloudAPI.") if (alert_id and event_ids) or not (alert_id or event_ids): @@ -189,7 +189,7 @@ def _helper_get_details(cb, alert_id=None, event_ids=None, bulk=False, timeout=0 time.sleep(0.5) continue if completed < contacted: - if timeout != 0 and (time.time() * 1000) - submit_time > timeout: + if (time.time() * 1000) - submit_time > timeout: timed_out = True break else: @@ -282,7 +282,8 @@ def bulk_get_details(cb, alert_id=None, event_ids=None, timeout=0): cb (CBCloudAPI): A reference to the CBCloudAPI object. alert_id (str): An alert id to fetch associated events event_ids (list): A list of event ids to fetch - timeout (int): AuthEvent details request timeout in milliseconds. + timeout (int): AuthEvent details request timeout in milliseconds. This can never be greater than the + configured default timeout. If this value is 0, the configured default timeout is used. Returns: list: list of Auth Events @@ -520,7 +521,7 @@ def __init__(self, doc_class, cb): super(AuthEventQuery, self).__init__(doc_class, cb) self._default_args["rows"] = self._batch_size self._query_token = None - self._timeout = 0 + self._timeout = cb.credentials.default_timeout self._timed_out = False def or_(self, **kwargs): @@ -563,18 +564,21 @@ def timeout(self, msecs): """Sets the timeout on a Auth Event query. Arguments: - msecs (int): Timeout duration, in milliseconds. + msecs (int): Timeout duration, in milliseconds. This value can never be greater than the configured + default timeout. If this value is 0, the configured default timeout is used. Returns: - Query (AuthEventQuery): The Query object with new milliseconds - parameter. + Query (AuthEventQuery): The Query object with new milliseconds parameter. Example: >>> cb = CBCloudAPI(profile="example_profile") >>> events = cb.select(AuthEvent).where(process_name="chrome.exe").timeout(5000) >>> print(*events) """ - self._timeout = msecs + if msecs <= 0: + self._timeout = self._cb.credentials.default_timeout + else: + self._timeout = min(msecs, self._cb.credentials.default_timeout) return self def _submit(self): @@ -596,6 +600,7 @@ def _submit(self): def _still_querying(self): """Check whether there are still records to be collected.""" + assert self._timeout > 0 if not self._query_token: self._submit() @@ -613,7 +618,7 @@ def _still_querying(self): if contacted == 0: return True if completed < contacted: - if self._timeout != 0 and (time.time() * 1000) - self._submit_time > self._timeout: + if (time.time() * 1000) - self._submit_time > self._timeout: self._timed_out = True return False return True diff --git a/src/cbc_sdk/platform/__init__.py b/src/cbc_sdk/platform/__init__.py index 255c07711..e7dc9c003 100644 --- a/src/cbc_sdk/platform/__init__.py +++ b/src/cbc_sdk/platform/__init__.py @@ -3,12 +3,15 @@ from cbc_sdk.platform.base import PlatformModel from cbc_sdk.platform.alerts import (Alert, WatchlistAlert, CBAnalyticsAlert, DeviceControlAlert, - ContainerRuntimeAlert, HostBasedFirewallAlert, IntrusionDetectionSystemAlert) + ContainerRuntimeAlert, HostBasedFirewallAlert, IntrusionDetectionSystemAlert, + GroupedAlert) from cbc_sdk.platform.alerts import Alert as BaseAlert from cbc_sdk.platform.audit import AuditLog +from cbc_sdk.platform.asset_groups import AssetGroup + from cbc_sdk.platform.devices import Device, DeviceFacet, DeviceSearchQuery from cbc_sdk.platform.events import Event, EventFacet @@ -17,6 +20,8 @@ from cbc_sdk.platform.policy_ruleconfigs import PolicyRuleConfig +from cbc_sdk.platform.previewer import DevicePolicyChangePreview + from cbc_sdk.platform.processes import (Process, ProcessFacet, AsyncProcessQuery, SummaryQuery) diff --git a/src/cbc_sdk/platform/alerts.py b/src/cbc_sdk/platform/alerts.py index 47a4043c2..d66aaf26c 100644 --- a/src/cbc_sdk/platform/alerts.py +++ b/src/cbc_sdk/platform/alerts.py @@ -28,12 +28,15 @@ from cbc_sdk.platform.processes import AsyncProcessQuery, Process from cbc_sdk.platform.legacy_alerts import LegacyAlertSearchQueryCriterionMixin from cbc_sdk.platform.jobs import Job +from cbc_sdk.platform.network_threat_metadata import NetworkThreatMetadata +from cbc_sdk.enterprise_edr.threat_intelligence import Watchlist from backports._datetime_fromisoformat import datetime_fromisoformat """Alert Models""" MAX_RESULTS_LIMIT = 10000 +REQUEST_IGNORED_KEYS = ["_doc_class", "_cb", "_count_valid", "_total_results", "_query_builder", "_sortcriteria"] class Alert(PlatformModel): @@ -519,6 +522,20 @@ def workflow_(self): """ return self.workflow + def deobfuscate_cmdline(self): + """ + Deobfuscates the command line of the process pointed to by the alert and returns the deobfuscated result. + + Required Permissions: + script.deobfuscation(EXECUTE) + + Returns: + dict: A dict containing information about the obfuscated command line, including the deobfuscated result. + """ + body = {"input": self.process_cmdline} + result = self._cb.post_object(f"/tau/v2/orgs/{self._cb.credentials.org_key}/reveal", body) + return result.json() + def close(self, closure_reason=None, determination=None, note=None): """ Closes this alert. @@ -790,6 +807,23 @@ def _query_implementation(cls, cb, **kwargs): """ return AlertSearchQuery(cls, cb).add_criteria("type", ["WATCHLIST"]) + def get_watchlist_objects(self): + """ + Returns the list of associated watchlist objects for the associated watchlist alert. + + Example: + >>> watchlist_alert = cb.select(Alert, "f643d11f-59ab-478f-92c3-4198ca9b8230") + >>> watchlist_objects = watchlist_alert.get_watchlist_objects() + + Returns: + list[Watchlist]: A list of Watchlist objects. + """ + watchlist_objects = [] + for watchlist in self.get("watchlists"): + watchlist_id = watchlist.get("id") + watchlist_objects.append(self._cb.select(Watchlist, watchlist_id)) + return watchlist_objects + class CBAnalyticsAlert(Alert): """Represents CB Analytics alerts.""" @@ -920,6 +954,108 @@ def _query_implementation(cls, cb, **kwargs): """ return AlertSearchQuery(cls, cb).add_criteria("type", ["INTRUSION_DETECTION_SYSTEM"]) + def get_network_threat_metadata(self): + """ + The NetworkThreatMetadata associated with this IDS alert if it exists. + + Example: + >>> alert_threat_metadata = ids_alert.get_network_threat_metadata() + + Returns: + NetworkThreatMetadata: The NetworkThreatMetadata associated with this IDS alert. + """ + tms_rule_id = self.get("tms_rule_id") + if tms_rule_id: + return self._cb.select(NetworkThreatMetadata, tms_rule_id) + return None + + +class GroupedAlert(PlatformModel): + """Represents Grouped alerts.""" + urlobject = "/api/alerts/v7/orgs/{0}/grouped_alerts" + swagger_meta_file = "platform/models/grouped_alert.yaml" + + def __init__(self, cb, model_unique_id, initial_data=None): + """ + Initialize the Grouped Alert object. + + Args: + cb (BaseAPI): Reference to API object used to communicate with the server. + model_unique_id (str): ID of the alert represented. + initial_data (dict): Initial data used to populate the alert. + """ + super(GroupedAlert, self).__init__(cb, model_unique_id, initial_data) + self._most_recent_alert = None + self._request = None + + most_recent_alert = initial_data["most_recent_alert"] + if "type" in most_recent_alert: + if most_recent_alert["type"] == "CB_ANALYTICS": + self._most_recent_alert = CBAnalyticsAlert(cb, most_recent_alert["id"], most_recent_alert) + elif most_recent_alert["type"] == "WATCHLIST": + self._most_recent_alert = WatchlistAlert(cb, most_recent_alert["id"], most_recent_alert) + elif most_recent_alert["type"] == "INTRUSION_DETECTION_SYSTEM": + self._most_recent_alert = IntrusionDetectionSystemAlert(cb, most_recent_alert["id"], most_recent_alert) + elif most_recent_alert["type"] == "DEVICE_CONTROL": + self._most_recent_alert = DeviceControlAlert(cb, most_recent_alert["id"], most_recent_alert) + elif most_recent_alert["type"] == "HOST_BASED_FIREWALL": + self._most_recent_alert = HostBasedFirewallAlert(cb, most_recent_alert["id"], most_recent_alert) + elif most_recent_alert["type"] == "CONTAINER_RUNTIME": + self._most_recent_alert = ContainerRuntimeAlert(cb, most_recent_alert["id"], most_recent_alert) + else: + self._most_recent_alert = Alert(cb, most_recent_alert["id"], most_recent_alert) + + @classmethod + def _query_implementation(cls, cb, **kwargs): + """ + Returns the appropriate query object for this alert type. + + Args: + cb (BaseAPI): Reference to API object used to communicate with the server. + **kwargs (dict): Not used, retained for compatibility. + + Returns: + GroupAlertSearchQuery: The query object for this alert type. + """ + return GroupedAlertSearchQuery(cls, cb) + + @property + def most_recent_alert_(self): + """ + Returns the most recent alert for a given group alert. + + Returns: + Alert: the most recent alert in the Group Alert. + """ + return self._most_recent_alert + + def get_alert_search_query(self): + """ + Returns the Alert Search Query needed to pull all alerts for a given Group Alert. + + Returns: + AlertSearchQuery: for all alerts associated with the calling group alert. + + Note: + Does not preserve sort criterion + """ + alert_search_query = self._cb.select(Alert) + for key, value in vars(alert_search_query).items(): + if hasattr(self._request, key) and key not in REQUEST_IGNORED_KEYS: + setattr(alert_search_query, key, self._request.__getattribute__(key)) + + alert_search_query.add_criteria(self._request._group_by.lower(), self.most_recent_alert["threat_id"]) + return alert_search_query + + def get_alerts(self): + """ + Returns the all alerts for a given Group Alert. + + Returns: + list: alerts associated with the calling group alert. + """ + return self.get_alert_search_query().all() + """Alert Queries""" @@ -947,6 +1083,7 @@ def __init__(self, doc_class, cb): self._query_builder = QueryBuilder() self._criteria = {} self._time_filters = {} + self._time_range = {} self._exclusions = {} self._time_exclusion_filters = {} self._sortcriteria = {} @@ -1029,7 +1166,6 @@ def set_time_range(self, *args, **kwargs): else: # everything before this is only for backwards compatibility, once v6 deprecates all the other # checks can be removed - self._time_range = {} self._time_range = time_filter return self @@ -1213,7 +1349,7 @@ def _build_request(self, from_row, max_rows, add_sort=True): request["query"] = query request["rows"] = self._batch_size - if hasattr(self, "_time_range"): + if self._time_range != {}: request["time_range"] = self._time_range if from_row > 1: request["start"] = from_row @@ -1496,3 +1632,184 @@ def set_remote_is_private(self, is_private, exclude=False): else: self._exclusions["remote_is_private"] = is_private return self + + def set_group_by(self, field): + """ + Converts the AlertSearchQuery to a GroupAlertSearchQuery grouped by the argument + + Args: + field (string): The field to group by, defaults to "threat_id" + + Returns: + AlertSearchQuery + + Note: + Does not preserve sort criterion + """ + grouped_alert_search_query = self._cb.select(GroupedAlert) + for key, value in vars(grouped_alert_search_query).items(): + if hasattr(self, key) and key not in REQUEST_IGNORED_KEYS: + setattr(grouped_alert_search_query, key, self.__getattribute__(key)) + grouped_alert_search_query.set_group_by(field) + + return grouped_alert_search_query + + +class GroupedAlertSearchQuery(AlertSearchQuery): + """Represents a query that is used to group Alert objects by a given field.""" + def __init__(self, *args, **kwargs): + """Initialize the GroupAlertSearchQuery.""" + super().__init__(*args, **kwargs) + self._group_by = "THREAT_ID" + + def set_group_by(self, field): + """ + Sets the 'group_by' query body parameter, determining which field to group the alerts by. + + Args: + field (string): The field to group by + """ + self._group_by = field + return self + + def _build_request(self, from_row, max_rows, add_sort=True): + """ + Creates the request body for an API call. + + Args: + from_row (int): The row to start the query at. + max_rows (int): The maximum number of rows to be returned. + add_sort (bool): If True(default), the sort criteria will be added as part of the request. + + Returns: + dict: The complete request body. + """ + request = super(GroupedAlertSearchQuery, self)._build_request(from_row, max_rows, add_sort=True) + request["group_by"] = {"field": self._group_by} + + return request + + def get_alert_search_query(self): + """ + Converts the GroupedAlertSearchQuery into a nongrouped AlertSearchQuery + + Returns: AlertSearchQuery + + Note: Does not preserve sort criterion + """ + alert_search_query = self._cb.select(Alert) + for key, value in vars(alert_search_query).items(): + if hasattr(self, key) and key not in REQUEST_IGNORED_KEYS: + setattr(alert_search_query, key, self.__getattribute__(key)) + + return alert_search_query + + def _perform_query(self, from_row=1, max_rows=-1): + """ + Performs the query and returns the results of the query in an iterable fashion. + + Args: + from_row (int): The row to start the query at (default 1). + max_rows (int): The maximum number of rows to be returned (default -1, meaning "all"). + + Returns: + Iterable: The iterated query. + """ + url = self._build_url("/_search") + current = from_row + numrows = 0 + still_querying = True + while still_querying: + request = self._build_request(current, max_rows) + resp = self._cb.post_object(url, body=request) + result = resp.json() + + self._total_results = result["num_found"] + self._group_by_total_count = result["group_by_total_count"] + + # Prevent 500 Internal Server Error from retrieving behind MAX_RESULTS_LIMIT + if self._total_results > MAX_RESULTS_LIMIT: + self._total_results = MAX_RESULTS_LIMIT + self._count_valid = True + + results = result.get("results", []) + for item in results: + grouped_alert = self._doc_class(self._cb, None, item) + grouped_alert._request = self + yield grouped_alert + current += 1 + numrows += 1 + + if max_rows > 0 and numrows == max_rows: + still_querying = False + break + + from_row = current + if current >= self._total_results: + still_querying = False + break + + def close(self, closure_reason=None, determination=None, note=None): + """ + Closing all alerts matching a grouped alert query is not implemented. + + Note: + - Closing all alerts in all groups returned by a ``GroupedAlertSearchQuery`` can be done by + getting the ``AlertSearchQuery`` and using close() on it as shown in the following example. + + Example: + >>> alert_query = grouped_alert_query.get_alert_search_query() + >>> alert_query.close(closure_reason, determination, note) + """ + raise NotImplementedError("this method is not implemented") + + def update(self, status, closure_reason=None, determination=None, note=None): + """ + Updating all alerts matching a grouped alert query is not implemented. + + Note: + - Updating all alerts in all groups returned by a ``GroupedAlertSearchQuery`` can be done by + getting the ``AlertSearchQuery`` and using update() on it as shown in the following example. + + Example: + >>> alert_query = grouped_alert_query.get_alert_search_query() + >>> job = alert_query.update("IN_PROGESS", "NO_REASON", "NONE", "Starting Investigation") + >>> completed_job = job.await_completion().result() + """ + raise NotImplementedError("this method is not implemented") + + def facets(self, fieldlist, max_rows=0, filter_values=False): + """ + Return information about the facets for this alert by search, using the defined criteria. + + Args: + fieldlist (list): List of facet field names. + max_rows (int): The maximum number of rows to return. 0 means return all rows. + filter_values (boolean): A flag to indicate whether any filters on a term should be applied to facet + calculation. When false (default), a filter on the term is ignored while calculating facets + + Returns: + list: A list of facet information specified as dicts. + error: invalid enum + + Raises: + FunctionalityDecommissioned: If the requested attribute is no longer available. + ApiError: If the facet field is not valid + """ + for field in fieldlist: + if field in GroupedAlertSearchQuery.DEPRECATED_FACET_FIELDS: + raise FunctionalityDecommissioned( + "Field '{0}' does is not a valid facet name because it was deprecated in " + "Alerts v7.".format(field)) + + request = self._build_request(0, max_rows, False) + del request['rows'] + request["terms"] = {"fields": fieldlist, "rows": max_rows} + request["filter_values"] = filter_values + + url = self._build_url("/_facet") + resp = self._cb.post_object(url, body=request) + if resp.status_code == 400: + raise ApiError(resp.json()) + result = resp.json() + return result.get("results", []) diff --git a/src/cbc_sdk/platform/asset_groups.py b/src/cbc_sdk/platform/asset_groups.py new file mode 100644 index 000000000..144190abb --- /dev/null +++ b/src/cbc_sdk/platform/asset_groups.py @@ -0,0 +1,695 @@ +#!/usr/bin/env python3 + +# ******************************************************* +# Copyright (c) VMware, Inc. 2020-2024. All Rights Reserved. +# SPDX-License-Identifier: MIT +# ******************************************************* +# * +# * DISCLAIMER. THIS PROGRAM IS PROVIDED TO YOU "AS IS" WITHOUT +# * WARRANTIES OR CONDITIONS OF ANY KIND, WHETHER ORAL OR WRITTEN, +# * EXPRESS OR IMPLIED. THE AUTHOR SPECIFICALLY DISCLAIMS ANY IMPLIED +# * WARRANTIES OR CONDITIONS OF MERCHANTABILITY, SATISFACTORY QUALITY, +# * NON-INFRINGEMENT AND FITNESS FOR A PARTICULAR PURPOSE. + +""" +The model and query classes for referencing asset groups. + +An *asset group* represents a group of devices (endpoints, VM workloads, and/or VDIs) that can have a single policy +applied to it so the protections of all similar assets are synchronized with one another. Policies carry a "position" +value as one of their attributes, so that, between the policy attached directly to the device, and the policies +attached to any asset groups the device is a member of, the one with the highest "position" is the one that applies to +that device. Devices may be added to an asset group either explicitly, or implicitly by specifying a query on the +asset group, such that all devices matching that search criteria are considered part of the asset group. + +Typical usage example:: + + # assume "cb" is an instance of CBCloudAPI + query = cb.select(AssetGroup).where('name:"HQ Devices"') + group = query.first() +""" + +from cbc_sdk.base import (MutableBaseModel, BaseQuery, QueryBuilder, QueryBuilderSupportMixin, IterableQueryMixin, + CriteriaBuilderSupportMixin, AsyncQueryMixin) +from cbc_sdk.errors import ApiError +from cbc_sdk.platform.devices import Device, DeviceSearchQuery +from cbc_sdk.platform.previewer import DevicePolicyChangePreview + + +class AssetGroup(MutableBaseModel): + """ + Represents an asset group within the current organization in the Carbon Black Cloud. + + ``AssetGroup`` objects are typically located via a search (using ``AssetGroupQuery``) before they can be operated + on. They may also be created on the Carbon Black Cloud by using the ``create_group()`` class method. + """ + urlobject = "/asset_groups/v1/orgs/{0}/groups" + urlobject_single = "/asset_groups/v1/orgs/{0}/groups/{1}" + primary_key = "id" + swagger_meta_file = "platform/models/asset_group.yaml" + + """The valid values for the 'filter' parameter to list_members().""" + VALID_MEMBER_FILTERS = ("ALL", "DYNAMIC", "MANUAL") + + def __init__(self, cb, model_unique_id=None, initial_data=None, force_init=False, full_doc=False): + """ + Initialize the ``AssetGroup`` object. + + Required Permissions: + group-management(READ) + + Args: + cb (BaseAPI): Reference to API object used to communicate with the server. + model_unique_id (int): ID of the policy. + initial_data (dict): Initial data used to populate the policy. + force_init (bool): If True, forces the object to be refreshed after constructing. Default False. + full_doc (bool): If True, object is considered "fully" initialized. Default False. + """ + super(AssetGroup, self).__init__(cb, model_unique_id=model_unique_id, initial_data=initial_data, + force_init=force_init if initial_data else True, full_doc=full_doc) + if model_unique_id is None: + self.touch(True) + + def _build_api_request_uri(self, http_method="GET"): + """ + Create the URL to be used to access instances of AssetGroup. + + Args: + http_method (str): Unused. + + Returns: + str: The actual URL + """ + uri = AssetGroup.urlobject.format(self._cb.credentials.org_key) + if self._model_unique_id is not None: + return f"{uri}/{self._model_unique_id}" + return uri + + @classmethod + def _query_implementation(cls, cb, **kwargs): + """ + Returns the appropriate query object for the asset group type. + + Args: + cb (BaseAPI): Reference to API object used to communicate with the server. + **kwargs (dict): Not used, retained for compatibility. + + Returns: + AssetGroupQuery: The query object for the asset group type. + """ + return AssetGroupQuery(cls, cb) + + def list_member_ids(self, rows=20, start=0): + """ + Gets a list of all member IDs in the group, optionally constrained by membership type. + + Required Permissions: + group-management(READ) + + Args: + rows (int): Maximum number of rows to retrieve from the server. The function may return fewer member IDs + if filtering is applied to the output. Default is 20. + start (int): Starting row to retrieve from the server; used to implement pagination. Default is 0. + + Returns: + list[dict]: List of dictionaries that contain the integer element ``external_member_id`` for the device ID, + the boolean element ``dynamic`` which is ``True`` if the group member is there due to the + group's dynamic query, and the boolean element ``manual`` which is ``True`` if the group member + was manually added. (It is possible for both ``dynamic`` and ``manual`` to be ``True``.) + """ + query_params = {"rows": rows, "start": start} + member_data = self._cb.get_object(self._build_api_request_uri() + "/members", query_params) + return [{"external_member_id": int(m["external_member_id"]), "dynamic": m["dynamic"], "manual": m["manual"]} + for m in member_data["members"]] + + def list_members(self, rows=20, start=0, membership="ALL"): + """ + Gets a list of all member devices in the group, optionally constrained by membership type. + + Required Permissions: + group-management(READ), devices(READ) + + Args: + rows (int): Maximum number of rows to retrieve from the server. The function may return fewer member IDs + if filtering is applied to the output. Default is 20. + start (int): Starting row to retrieve from the server; used to implement pagination. Default is 0. + membership (str): Can restrict the types of members that are returned by this method. Values are "ALL" + to return all members, "DYNAMIC" to return only members that were added via the asset + group query, or "MANUAL" to return only manually-added members. Default is "ALL". + + Returns: + list[Device]: List of ``Device`` objects comprising the membership of the group.`` + """ + if membership not in AssetGroup.VALID_MEMBER_FILTERS: + raise ApiError(f"invalid filter value: {membership}") + id_list = self.list_member_ids(rows, start) + if membership == "ALL": + return [self._cb.select(Device, m["external_member_id"]) for m in id_list] + elif membership == "DYNAMIC": + return [self._cb.select(Device, m["external_member_id"]) for m in id_list if m["dynamic"]] + elif membership == "MANUAL": + return [self._cb.select(Device, m["external_member_id"]) for m in id_list if m["manual"]] + + def add_members(self, members): + """ + Adds additional members to this asset group. + + Required Permissions: + group-management(CREATE) + + Args: + members (int, Device, or list): The members to be added to the group. This may be an integer device ID, + a ``Device`` object, or a list of either integers or ``Device`` objects. + """ + member_ids = [] + if isinstance(members, int): + member_ids = [str(members)] + elif isinstance(members, Device): + member_ids = [str(members.id)] + else: + for m in members: + if isinstance(m, int): + member_ids.append(str(m)) + elif isinstance(m, Device): + member_ids.append(str(m.id)) + if len(member_ids) > 0: + self._cb.post_object(self._build_api_request_uri() + "/members", + {"action": "CREATE", "external_member_ids": member_ids}) + + def remove_members(self, members): + """ + Removes members from this asset group. + + Required Permissions: + group-management(DELETE) + + Args: + members (int, Device, or list): The members to be removed from the group. This may be an integer device ID, + a ``Device`` object, or a list of either integers or ``Device`` objects. + """ + member_ids = [] + if isinstance(members, int): + member_ids = [str(members)] + elif isinstance(members, Device): + member_ids = [str(members.id)] + else: + for m in members: + if isinstance(m, int): + member_ids.append(str(m)) + elif isinstance(m, Device): + member_ids.append(str(m.id)) + if len(member_ids) > 0: + self._cb.post_object(self._build_api_request_uri() + "/members", + {"action": "REMOVE", "external_member_ids": member_ids}) + + def get_statistics(self): + """ + For this group, return statistics about its group membership. + + The statistics include how many of the group's members belong to other groups, and how many members + belong to groups without policy association. + + See + `this page `_ + for more details on the structure of the return value from this method. + + Required Permissions: + group-management(READ) + + Returns: + dict: A dict with two elements. The "intersections" element contains elements detailing which groups share + members with this group, and which members they are. The "unassigned_properties" element contains + elements showing which members belong to groups without policy association. + """ # noqa: E501 W505 + return self._cb.get_object(self._build_api_request_uri() + "/membership_summary") + + def preview_add_members(self, devices): + """ + Previews changes to the effective policies for devices which result from adding them to this asset group. + + Required Permissions: + org.policies (READ) + + Args: + devices (list): The devices which will be added to this asset group. Each entry in this list is either + an integer device ID or a ``Device`` object. + + Returns: + list[DevicePolicyChangePreview]: A list of ``DevicePolicyChangePreview`` objects representing the assets + that change which policy is effective as the result of this operation. + """ + return AssetGroup.preview_add_members_to_groups(self._cb, devices, [self]) + + def preview_remove_members(self, devices): + """ + Previews changes to the effective policies for devices which result from removing them from this asset group. + + Required Permissions: + org.policies (READ) + + Args: + devices (list): The devices which will be removed from this asset group. Each entry in this list is either + an integer device ID or a ``Device`` object. + + Returns: + list[DevicePolicyChangePreview]: A list of ``DevicePolicyChangePreview`` objects representing the assets + that change which policy is effective as the result of this operation. + """ + return AssetGroup.preview_remove_members_from_groups(self._cb, devices, [self]) + + def preview_save(self): + """ + Previews changes to the effective policies for devices which result from unsaved changes to this asset group. + + Required Permissions: + org.policies (READ) + + Returns: + list[DevicePolicyChangePreview]: A list of ``DevicePolicyChangePreview`` objects representing the assets + that change which policy is effective as the result of this operation. + """ + policy_id = None + query = None + remove_policy_id = False + remove_query = False + if "policy_id" in self._dirty_attributes: + if self._info["policy_id"] is None: + remove_policy_id = True + else: + policy_id = self._info["policy_id"] + if "query" in self._dirty_attributes: + if self._info["query"] is None: + remove_query = True + else: + query = self._info["query"] + return AssetGroup.preview_update_asset_groups(self._cb, [self], policy_id=policy_id, query=query, + remove_policy_id=remove_policy_id, remove_query=remove_query) + + def preview_delete(self): + """ + Previews changes to the effective policies for devices which result from this asset group being deleted. + + Required Permissions: + org.policies (READ) + + Returns: + list[DevicePolicyChangePreview]: A list of ``DevicePolicyChangePreview`` objects representing the assets + that change which policy is effective as the result of this operation. + """ + return AssetGroup.preview_delete_asset_groups(self._cb, [self]) + + @classmethod + def create_group(cls, cb, name, description=None, policy_id=None, query=None): + """ + Create a new asset group. + + Required Permissions: + group-management(CREATE) + + Args: + cb (BaseAPI): Reference to API object used to communicate with the server. + name (str): Name for the new asset group. + description (str): Description for the new asset group. Default is ``None``. + policy_id (int): ID of the policy to be associated with this asset group. Default is ``None``. + query (str): Query string to be used to dynamically populate this group. Default is ``None``, + which means devices _must_ be manually assigned to the group. + + Returns: + AssetGroup: The new asset group. + """ + group_data = {"name": name, "member_type": "DEVICE"} + if description: + group_data["description"] = description + if policy_id: + group_data["policy_id"] = policy_id + if query: + group_data["query"] = query + group = AssetGroup(cb, None, group_data, False, True) + group.save() + return group + + @classmethod + def get_all_groups(cls, cb): + """ + Retrieve all asset groups in the organization. + + Required Permissions: + group-management(READ) + + Args: + cb (BaseAPI): Reference to API object used to communicate with the server. + + Returns: + list[AssetGroup]: List of ``AssetGroup`` objects corresponding to the asset groups in the organization. + """ + return_data = cb.get_object(AssetGroup.urlobject.format(cb.credentials.org_key)) + return [AssetGroup(cb, v['id'], v) for v in return_data['results']] + + @classmethod + def _collect_groups(cls, groups): + """ + Collects a list of asset groups as IDs. + + Args: + groups (list): A list of items, each of which may be either string group IDs or ``AssetGroup`` objects. + + Returns: + list[str]: A list of string group IDs. + """ + group_list = [] + for group in groups: + if isinstance(group, AssetGroup): + group_list.append(group.id) + elif isinstance(group, str): + group_list.append(group) + return group_list + + @classmethod + def _preview_asset_group_member_change(cls, cb, action, members, groups): + """ + Internal function which handles asset group change previews. + + Required Permissions: + org.policies (READ) + + Args: + cb (BaseAPI): Reference to API object used to communicate with the server. + action (str): The action to be passed to the server. + members (list): A list of either integer device IDs or ``Device`` objects. + groups (list): A list of either string asset group IDs or ``AssetGroup`` objects. + + Returns: + list[DevicePolicyChangePreview]: A list of ``DevicePolicyChangePreview`` objects representing the assets + that change which policy is effective as the result of this operation. + """ + ret = cb.post_object(f"/policy-assignment/v1/orgs/{cb.credentials.org_key}/asset-groups/preview", + {"action": action, "asset_ids": Device._collect_devices(members), + "asset_group_ids": AssetGroup._collect_groups(groups)}) + return [DevicePolicyChangePreview(cb, p) for p in ret.json()["preview"]] + + @classmethod + def preview_add_members_to_groups(cls, cb, members, groups): + """ + Previews changes to the effective policies for devices which result from adding them to asset groups. + + Required Permissions: + org.policies (READ) + + Args: + cb (BaseAPI): Reference to API object used to communicate with the server. + members (list): The devices which will be added to new asset groups. Each entry in this list is either + an integer device ID or a ``Device`` object. + groups (list): The asset groups to which the devices will be added. Each entry in this list is either + a string asset group ID or an ``AssetGroup`` object. + + Returns: + list[DevicePolicyChangePreview]: A list of ``DevicePolicyChangePreview`` objects representing the assets + that change which policy is effective as the result of this operation. + """ + return cls._preview_asset_group_member_change(cb, "ADD_MEMBERS", members, groups) + + @classmethod + def preview_remove_members_from_groups(cls, cb, members, groups): + """ + Previews changes to the effective policies for devices which result from removing them from asset groups. + + Required Permissions: + org.policies (READ) + + Args: + cb (BaseAPI): Reference to API object used to communicate with the server. + members (list): The devices which will be removed from asset groups. Each entry in this list is either + an integer device ID or a ``Device`` object. + groups (list): The asset groups from which the devices will be removed. Each entry in this list is either + a string asset group ID or an ``AssetGroup`` object. + + Returns: + list[DevicePolicyChangePreview]: A list of ``DevicePolicyChangePreview`` objects representing the assets + that change which policy is effective as the result of this operation. + """ + return cls._preview_asset_group_member_change(cb, "REMOVE_MEMBERS", members, groups) + + @classmethod + def preview_create_asset_group(cls, cb, policy_id, query): + """ + Previews changes to the effective policies for devices which result from creating a new asset group. + + Required Permissions: + org.policies (READ) + + Args: + cb (BaseAPI): Reference to API object used to communicate with the server. + policy_id (int): The ID of the policy to be added to the new asset group. + query (str): The query string to be used for the new asset group. + + Returns: + list[DevicePolicyChangePreview]: A list of ``DevicePolicyChangePreview`` objects representing the assets + that change which policy is effective as the result of this operation. + """ + ret = cb.post_object(f"/policy-assignment/v1/orgs/{cb.credentials.org_key}/asset-groups/preview", + {"action": "ASSET_GROUPS_CREATE", "asset_group_query": query, "policy_id": policy_id}) + return [DevicePolicyChangePreview(cb, p) for p in ret.json()["preview"]] + + @classmethod + def preview_update_asset_groups(cls, cb, groups, policy_id=None, query=None, remove_policy_id=False, + remove_query=False): + """ + Previews changes to the effective policies for devices which result from changes to asset groups. + + Required Permissions: + org.policies (READ) + + Args: + cb (BaseAPI): Reference to API object used to communicate with the server. + groups (list): The asset groups which will be updated. Each entry in this list is either + a string asset group ID or an ``AssetGroup`` object. + policy_id (int): If this is not ``None`` and ``remove_policy_id`` is ``False``, contains the ID of the + policy to be assigned to the specified groups. Default is ``None``. + query (str): If this is not ``None`` and ``remove_query`` is ``False``, contains the new query string + to be assigned to the specified groups. Default is ``None``. + remove_policy_id (bool): If this is ``True``, indicates that the specified groups will have their policy + ID removed entirely. Default is ``False``. + remove_query (bool): If this is ``True``, indicates that the specified groups will have their query + strings removed entirely. Default is ``False``. + + Returns: + list[DevicePolicyChangePreview]: A list of ``DevicePolicyChangePreview`` objects representing the assets + that change which policy is effective as the result of this operation. + """ + if not (remove_policy_id or remove_query) and policy_id is None and query is None: + return [] + body = {"action": "ASSET_GROUPS_UPDATE", "asset_group_ids": AssetGroup._collect_groups(groups)} + if remove_policy_id: + body["policy_id"] = None + elif policy_id is not None: + body["policy_id"] = policy_id + if remove_query: + body["asset_group_query"] = None + elif query is not None: + body["asset_group_query"] = query + ret = cb.post_object(f"/policy-assignment/v1/orgs/{cb.credentials.org_key}/asset-groups/preview", body) + return [DevicePolicyChangePreview(cb, p) for p in ret.json()["preview"]] + + @classmethod + def preview_delete_asset_groups(cls, cb, groups): + """ + Previews changes to the effective policies for devices which result from deleting asset groups. + + Required Permissions: + org.policies (READ) + + Args: + cb (BaseAPI): Reference to API object used to communicate with the server. + groups (list): The asset groups which will be deleted. Each entry in this list is either + a string asset group ID or an ``AssetGroup`` object. + + Returns: + list[DevicePolicyChangePreview]: A list of ``DevicePolicyChangePreview`` objects representing the assets + that change which policy is effective as the result of this operation. + """ + ret = cb.post_object(f"/policy-assignment/v1/orgs/{cb.credentials.org_key}/asset-groups/preview", + {"action": "ASSET_GROUPS_DELETE", "asset_group_ids": AssetGroup._collect_groups(groups)}) + return [DevicePolicyChangePreview(cb, p) for p in ret.json()["preview"]] + + +class AssetGroupQuery(BaseQuery, QueryBuilderSupportMixin, IterableQueryMixin, CriteriaBuilderSupportMixin, + AsyncQueryMixin): + """ + Query object that is used to locate ``AssetGroup`` objects. + + The ``AssetGroupQuery`` is constructed via SDK functions like the ``select()`` method on ``CBCloudAPI``. + The user would then add a query and/or criteria to it before iterating over the results. + + The following criteria are supported on ``AssetGroupQuery`` via the standard ``add_criteria()`` method: + + * ``discovered: bool`` - Whether the asset group has been discovered or not. + * ``name: str`` - The asset group name to be matched. + * ``policy_id: int`` - The policy ID to be matched, expressed as an integer. + * ``group_id: str`` - The asset group ID to be matched, expressed as a GUID. + """ + def __init__(self, doc_class, cb): + """ + Initialize the ``AssetGroupQuery``. + + Args: + doc_class (class): The model class that will be returned by this query. + cb (BaseAPI): Reference to API object used to communicate with the server. + """ + self._doc_class = doc_class + self._cb = cb + super(AssetGroupQuery, self).__init__() + + self._query_builder = QueryBuilder() + self._criteria = {} + self._sortcriteria = {} + self._default_rows = 100 + self._count_valid = False + self._total_results = 0 + + def set_rows(self, rows): + """ + Sets the number of query rows to fetch in each batch from the server. + + Args: + rows (int): The number of rows to be fetched fromt hes erver at a time. Default is 100. + + Returns: + AssetGroupQuery: This instance. + """ + self._default_rows = rows + return self + + def sort_by(self, key, direction="ASC"): + """ + Sets the sorting behavior on a query's results. + + Example: + >>> cb.select(AssetGroup).sort_by("name") + + Args: + key (str): The key in the schema to sort by. + direction (str): The sort order, either "ASC" or "DESC". + + Returns: + AssetGroupQuery: This instance. + """ + if direction not in DeviceSearchQuery.VALID_DIRECTIONS: + raise ApiError("invalid sort direction specified") + self._sortcriteria = {"field": key, "order": direction} + return self + + def _build_request(self, from_row, max_rows, add_sort=True): + """ + Creates the request body for an API call. + + Args: + from_row (int): The row to start the query at. + max_rows (int): The maximum number of rows to be returned. + add_sort (bool): If ``True`` (default), the sort criteria will be added as part of the request. + + Returns: + dict: The complete request body. + """ + request = {"rows": self._default_rows} + if len(self._criteria) > 0: + request["criteria"] = self._criteria + query = self._query_builder._collapse() + if query: + request["query"] = query + if from_row >= 0: + request["start"] = from_row + if max_rows >= 0: + request["rows"] = max_rows + if add_sort and self._sortcriteria != {}: + request["sort"] = [self._sortcriteria] + return request + + def _build_url(self, tail_end): + """ + Creates the URL to be used for an API call. + + Args: + tail_end (str): String to be appended to the end of the generated URL. + + Returns: + str: The complete URL. + """ + url = self._doc_class.urlobject.format(self._cb.credentials.org_key) + tail_end + return url + + def _count(self): + """Returns the number of results from the run of this query.""" + if self._count_valid: + return self._total_results + + url = self._build_url("/_search") + request = self._build_request(0, -1) + resp = self._cb.post_object(url, body=request) + result = resp.json() + + self._total_results = result["num_found"] + self._count_valid = True + + return self._total_results + + def _perform_query(self, from_row=0, max_rows=-1): + """ + Performs the query and returns the results of the query in an iterable fashion. + + Required Permissions: + group-management(READ) + + Args: + from_row (int): The row to start the query at (default 0). + max_rows (int): The maximum number of rows to be returned (default -1, meaning "all"). + + Returns: + Iterable: The iterated query. + """ + url = self._build_url("/_search") + current = from_row + numrows = 0 + still_querying = True + while still_querying: + request = self._build_request(current, max_rows) + resp = self._cb.post_object(url, body=request) + result = resp.json() + + self._total_results = result["num_found"] + self._count_valid = True + + results = result.get("results", []) + for item in results: + yield self._doc_class(self._cb, item["id"], item, False, True) + current += 1 + numrows += 1 + + if max_rows > 0 and numrows == max_rows: + still_querying = False + break + + from_row = current + if current >= self._total_results: + still_querying = False + break + + def _run_async_query(self, context): + """ + Executed in the background to run an asynchronous query. + + Required Permissions: + group-management(READ) + + Args: + context (object): Not used; always ``None``. + + Returns: + list[AssetGroup]: Result of the async query, as a list of ``AssetGroup`` objects. + """ + url = self._build_url("/_search") + request = self._build_request(0, -1) + resp = self._cb.post_object(url, body=request) + return_data = resp.json()["results"] + output = [AssetGroup(self._cb, item['id'], item, False, True) for item in return_data] + self._total_results = len(output) + self._count_valid = True + return output diff --git a/src/cbc_sdk/platform/devices.py b/src/cbc_sdk/platform/devices.py index 9737bf2b3..30d62d823 100644 --- a/src/cbc_sdk/platform/devices.py +++ b/src/cbc_sdk/platform/devices.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # ******************************************************* -# Copyright (c) VMware, Inc. 2020-2023. All Rights Reserved. +# Copyright (c) VMware, Inc. 2020-2024. All Rights Reserved. # SPDX-License-Identifier: MIT # ******************************************************* # * @@ -29,13 +29,18 @@ from cbc_sdk.errors import ApiError, ServerError, NonQueryableModel from cbc_sdk.platform import PlatformModel +from cbc_sdk.platform.jobs import Job from cbc_sdk.platform.vulnerability_assessment import Vulnerability, VulnerabilityQuery from cbc_sdk.base import (UnrefreshableModel, BaseQuery, QueryBuilder, QueryBuilderSupportMixin, CriteriaBuilderSupportMixin, IterableQueryMixin, AsyncQueryMixin) +from cbc_sdk.platform.previewer import DevicePolicyChangePreview from cbc_sdk.workload import NSXRemediationJob +import logging import time +log = logging.getLogger(__name__) + """"Device Models""" @@ -52,6 +57,9 @@ class Device(PlatformModel): primary_key = "id" swagger_meta_file = "platform/models/device.yaml" + """The valid values for the 'filter' parameter to get_asset_groups_for_devices().""" + VALID_ASSETGROUP_FILTERS = ("ALL", "DYNAMIC", "MANUAL") + def __init__(self, cb, model_unique_id, initial_data=None): """ Initialize the ``Device`` object. @@ -315,6 +323,217 @@ def nsx_remediation(self, tag, set_tag=True): return None # clearing tag is a no-op in this case return NSXRemediationJob.start_request(self._cb, self.id, tag, set_tag) + def get_asset_group_ids(self, membership="ALL"): + """ + Finds the list of asset group IDs that this device is a member of. + + Args: + membership (str): Can restrict the types of group membership returned by this method. Values are "ALL" + to return all groups, "DYNAMIC" to return only groups that each member belongs to via the + asset group query, or "MANUAL" to return only groups that the members were manually + added to. Default is "ALL". + + Returns: + list[str]: A list of asset group IDs this device belongs to. + """ + if membership not in Device.VALID_ASSETGROUP_FILTERS: + raise ApiError(f"Invalid filter value: {membership}") + if membership == "ALL": + return [g['id'] for g in self._info['asset_group']] + elif membership == "MANUAL": + return [g['id'] for g in self._info['asset_group'] if g['membership_type'] == 'MANUAL'] + elif membership == "DYNAMIC": + return [g['id'] for g in self._info['asset_group'] if g['membership_type'] == 'DYNAMIC'] + + def get_asset_groups(self, membership="ALL"): + """ + Finds the list of asset groups that this device is a member of. + + Required Permissions: + group-management(READ) + + Args: + membership (str): Can restrict the types of group membership returned by this method. Values are "ALL" + to return all groups, "DYNAMIC" to return only groups that each member belongs to via the + asset group query, or "MANUAL" to return only groups that the members were manually + added to. Default is "ALL". + + Returns: + list[AssetGroup]: A list of asset groups this device belongs to. + """ + return [self._cb.select("AssetGroup", v) for v in self.get_asset_group_ids(membership)] + + def add_to_groups_by_id(self, group_ids): + """ + Given a list of asset group IDs, adds this device to each one as a member. + + Args: + group_ids (list[str]): The list of group IDs to add this device to. + """ + actual_group_ids = set(group_ids).difference(self.get_asset_group_ids("MANUAL")) + for group_id in actual_group_ids: + url = f"/asset_groups/v1/orgs/{self._cb.credentials.org_key}/groups/{group_id}/members" + self._cb.post_object(url, {"action": "CREATE", "external_member_ids": [str(self._model_unique_id)]}) + if len(actual_group_ids) > 0: + self._refresh() + + def add_to_groups(self, groups): + """ + Given a list of asset groups, adds this device to each one as a member. + + Args: + groups (list[AssetGroup]): The list of groups to add this device to. + """ + existing_ids = self.get_asset_group_ids("MANUAL") + actual_groups = [g for g in groups if g.id not in existing_ids] + for group in actual_groups: + group.add_members(self) + if len(actual_groups) > 0: + self._refresh() + + def remove_from_groups_by_id(self, group_ids): + """ + Given a list of asset group IDs, removes this device from each one as a member. + + Args: + group_ids (list[str]): The list of group IDs to remove this device from. + """ + actual_group_ids = set(group_ids).intersection(self.get_asset_group_ids("MANUAL")) + for group_id in actual_group_ids: + url = f"/asset_groups/v1/orgs/{self._cb.credentials.org_key}/groups/{group_id}/members" + self._cb.post_object(url, {"action": "REMOVE", "external_member_ids": [str(self._model_unique_id)]}) + if len(actual_group_ids) > 0: + self._refresh() + + def remove_from_groups(self, groups): + """ + Given a list of asset groups, removes this device from each one as a member. + + Args: + groups (list[AssetGroup]): The list of groups to remove this device from. + """ + existing_ids = self.get_asset_group_ids("MANUAL") + actual_groups = [g for g in groups if g.id in existing_ids] + for group in actual_groups: + group.remove_members(self) + if len(actual_groups) > 0: + self._refresh() + + def preview_remove_policy_override(self): + """ + Previews changes to this device's effective policy which result from removing its policy override. + + Required Permissions: + org.policies (READ) + + Returns: + list[DevicePolicyChangePreview]: A list of ``DevicePolicyChangePreview`` objects representing the assets + that change which policy is effective as the result of this operation. + """ + return Device.preview_remove_policy_override_for_devices(self._cb, [self]) + + @classmethod + def _collect_devices(cls, devices): + """ + Collects a list of devices as IDs. + + Args: + devices (list): A list of items, each of which may be either integer device IDs or ``Device`` objects. + + Returns: + list[int]: A list of integer device IDs. + """ + device_ids = [] + for d in devices: + if isinstance(d, Device): + device_ids.append(d.id) + elif isinstance(d, int): + device_ids.append(d) + return device_ids + + @classmethod + def get_asset_groups_for_devices(cls, cb, devices, membership="ALL"): + """ + Given a list of devices, returns lists of asset groups that they are members of. + + Required Permissions: + group-management(READ) + + Args: + cls (class): Class associated with the ``Device`` object. + cb (BaseAPI): Reference to API object used to communicate with the server. + devices (int, Device, or list): The devices to find the group membership of. This may be an integer + device ID, a ``Device`` object, or a list of either integers or + ``Device`` objects. + membership (str): Can restrict the types of group membership returned by this method. Values are "ALL" + to return all groups, "DYNAMIC" to return only groups that each member belongs to via the + asset group query, or "MANUAL" to return only groups that the members were manually + added to. Default is "ALL". + + Returns: + dict: A dict containing member IDs as keys, and lists of group IDs as values. + """ + if membership not in Device.VALID_ASSETGROUP_FILTERS: + raise ApiError(f"Invalid filter value: {membership}") + if isinstance(devices, int): + device_ids = [str(devices)] + elif isinstance(devices, Device): + device_ids = [str(devices.id)] + else: + device_ids = [str(v) for v in Device._collect_devices(devices)] + if len(device_ids) > 0: + postdata = {"external_member_ids": device_ids} + if membership != "ALL": + postdata["membership_type"] = [membership] + rc = cb.post_object(f"/asset_groups/v1/orgs/{cb.credentials.org_key}/members", postdata) + return {int(k): v for k, v in rc.json().items()} + else: + return {} + + @classmethod + def preview_add_policy_override_for_devices(cls, cb, policy_id, devices): + """ + Previews changes to the effective policies for devices which result from setting a policy override on them. + + Required Permissions: + org.policies (READ) + + Args: + cb (BaseAPI): Reference to API object used to communicate with the server. + policy_id (int): The ID of the policy to be added to the devices as an override. + devices (list): The devices which will have their policies overridden. Each entry in this list is either + an integer device ID or a ``Device`` object. + + Returns: + list[DevicePolicyChangePreview]: A list of ``DevicePolicyChangePreview`` objects representing the assets + that change which policy is effective as the result of this operation. + """ + ret = cb.post_object(f"/policy-assignment/v1/orgs/{cb.credentials.org_key}/asset-groups/preview", + {"action": "ADD_POLICY_OVERRIDE", "asset_ids": Device._collect_devices(devices), + "policy_id": policy_id}) + return [DevicePolicyChangePreview(cb, p) for p in ret.json()["preview"]] + + @classmethod + def preview_remove_policy_override_for_devices(cls, cb, devices): + """ + Previews changes to the effective policies for devices which result from removing their policy override. + + Required Permissions: + org.policies (READ) + + Args: + cb (BaseAPI): Reference to API object used to communicate with the server. + devices (list): The devices which will have their policy overrides removed. Each entry in this list + is either an integer device ID or a ``Device`` object. + + Returns: + list[DevicePolicyChangePreview]: A list of ``DevicePolicyChangePreview`` objects representing the assets + that change which policy is effective as the result of this operation. + """ + ret = cb.post_object(f"/policy-assignment/v1/orgs/{cb.credentials.org_key}/asset-groups/preview", + {"action": "REMOVE_POLICY_OVERRIDE", "asset_ids": Device._collect_devices(devices)}) + return [DevicePolicyChangePreview(cb, p) for p in ret.json()["preview"]] + class DeviceFacet(UnrefreshableModel): """ @@ -395,6 +614,8 @@ def query_devices(self): query.set_auto_scaling_group_name([self.id]) elif self._outer.field == "virtual_private_cloud_id": query.set_virtual_private_cloud_id([self.id]) + elif self._outer.field == "deployment_type": + query.set_deployment_type([self.id]) return query @classmethod @@ -447,7 +668,7 @@ class DeviceSearchQuery(BaseQuery, QueryBuilderSupportMixin, CriteriaBuilderSupp VALID_PRIORITIES = ["LOW", "MEDIUM", "HIGH", "MISSION_CRITICAL"] VALID_DEPLOYMENT_TYPES = ["ENDPOINT", "WORKLOAD", "VDI", "AWS", "AZURE", "GCP"] VALID_FACET_FIELDS = ["policy_id", "status", "os", "ad_group_id", "cloud_provider_account_id", - "auto_scaling_group_name", "virtual_private_cloud_id"] + "auto_scaling_group_name", "virtual_private_cloud_id", "deployment_type"] def __init__(self, doc_class, cb): """ @@ -467,6 +688,9 @@ def __init__(self, doc_class, cb): self._time_filter = {} self._exclusions = {} self._sortcriteria = {} + self._search_after = None + self.num_remaining = None + self.num_found = None self.max_rows = -1 def _update_exclusions(self, key, newlist): @@ -893,6 +1117,9 @@ def download(self): """ Uses the query parameters that have been set to download all device listings in CSV format. + Deprecated: + Use DeviceSearchQuery.export for increased export capabilities and limits + Example: >>> cb.select(Device).set_status(["ALL"]).download() @@ -905,6 +1132,7 @@ def download(self): Raises: ApiError: If status values have not been set before calling this function. """ + log.warning("DeviceSearchQuery.download is deprecated, use DeviceSearchQuery.export instead") tmp = self._criteria.get("status", []) if not tmp: raise ApiError("at least one status must be specified to download") @@ -927,6 +1155,85 @@ def download(self): url = self._build_url("/_search/download") return self._cb.get_raw_data(url, query_params) + def export(self): + """ + Starts the process of exporting Devices from the organization in CSV format. + + Example: + >>> cb.select(Device).set_status(["ACTIVE"]).export() + + Required Permissions: + device(READ) + + Returns: + Job: The asynchronous job that will provide the export output when the server has prepared it. + """ + request = self._build_request(0, -1) + request["format"] = "CSV" + url = self._build_url("/_export") + resp = self._cb.post_object(url, body=request) + result = resp.json() + return Job(self._cb, result["id"], result) + + def scroll(self, rows=10000): + """ + Iteratively paginate all Devices beyond the 10k max search limits. + + To fetch the next set of Devices repeatively call the scroll function until + `DeviceSearchQuery.num_remaining == 0` or no results are returned. + + Example: + >>> cb.select(Device).set_status(["ACTIVE"]).scroll(100) + + Required Permissions: + device(READ) + + Args: + rows (int): The number of rows to fetch + + Returns: + list[Device]: The list of results + """ + if self.num_remaining == 0: + return [] + elif rows > 10000: + rows = 10000 + + url = self._build_url("/_scroll") + + # Sort by last_contact_time enforced + self._sort = {} + + request = self._build_request(0, rows) + + if self._search_after is not None: + request["search_after"] = self._search_after + + resp = self._cb.post_object(url, body=request) + resp_json = resp.json() + + # Calculate num_remaining until backend provides in response + if self._search_after is None: + self.num_remaining = resp_json["num_found"] - len(resp_json["results"]) + self.num_found = resp_json["num_found"] + elif self.num_found != resp_json["num_found"]: + diff = resp_json["num_found"] - self.num_found + self.num_remaining = self.num_remaining - len(resp_json["results"]) + diff + else: + self.num_remaining = self.num_remaining - len(resp_json["results"]) + + if self.num_remaining < 0: + self.num_remaining = 0 + + # Capture latest state + self._search_after = resp_json["search_after"] + + results = [] + for item in resp_json["results"]: + results.append(self._doc_class(self._cb, item["id"], item)) + + return results + def _bulk_device_action(self, action_type, options=None): """ Perform a bulk action on all devices matching the current search criteria. diff --git a/src/cbc_sdk/platform/models/asset_group.yaml b/src/cbc_sdk/platform/models/asset_group.yaml new file mode 100644 index 000000000..8fbf6ea64 --- /dev/null +++ b/src/cbc_sdk/platform/models/asset_group.yaml @@ -0,0 +1,45 @@ +type: object +properties: + id: + type: string + description: The asset group identifier. + name: + type: string + description: The asset group name. + description: + type: string + description: The asset group description. + org_key: + type: string + description: The organization key of the owning organization. + status: + type: string + description: Status of the group. + member_type: + type: string + description: The type of objects this asset group contains. + enum: + - DEVICE + discovered: + type: boolean + description: Whether this group has been discovered. + create_time: + type: string + format: date-time + description: Date and time the group was created. + update_time: + type: string + format: date-time + description: Date and time the group was last updated. + member_count: + type: integer + description: Number of members in this group. + policy_id: + type: integer + description: ID of the policy associated with this group. + policy_name: + type: string + description: Name of the policy associated with this group. + query: + type: string + description: Search query used to determine which assets are included in the group membership. diff --git a/src/cbc_sdk/platform/models/device.yaml b/src/cbc_sdk/platform/models/device.yaml index 3a3ea1454..41faeb56c 100644 --- a/src/cbc_sdk/platform/models/device.yaml +++ b/src/cbc_sdk/platform/models/device.yaml @@ -10,6 +10,24 @@ properties: type: integer format: int64 description: Device's AD group + asset_group: + type: array + description: The asset groups that this device is a member of. + items: + type: object + properties: + id: + type: string + description: The ID of the asset group the device belongs to. + name: + type: string + description: The name of the asset group the device belongs to. + membership_type: + type: string + description: The type of membership this device has in the asset group. + enum: + - DYNAMIC + - MANUAL av_ave_version: type: string description: AVE version (part of AV Version) diff --git a/src/cbc_sdk/platform/models/grouped_alert.yaml b/src/cbc_sdk/platform/models/grouped_alert.yaml new file mode 100644 index 000000000..ed7f0f689 --- /dev/null +++ b/src/cbc_sdk/platform/models/grouped_alert.yaml @@ -0,0 +1,50 @@ +type: object +properties: + count: + type: integer + description: Count of individual alerts that are a part of the group + determination_values: + type: object + description: Map of determination (TRUE_POSITIVE, FALSE_POSITIVE, NONE) to the number of individual alerts in the + group with that determination. Determinations with no alerts are omitted. + ml_classification_final_verdicts: + type: object + description: Map of ML classification (ANOMALOUS, NOT_ANOMALOUS, NO_PREDICTION) to the number of individual alerts + in the group with that classification. Classifications with no alerts are omitted. + workflow_states: + type: object + description: Map of workflow state (OPEN, IN_PROGRESS, CLOSED) to the number of individual alerts in the group in + that state. States with no alerts are omitted. + device_count: + type: integer + description: Count of unique devices where this alert can be found + first_alert_timestamp: + type: string + format: date-time + description: Timestamp of the first (oldest) alert in the group + highest_severity: + type: integer + description: Highest severity score of all alerts in the group + last_alert_timestamp: + type: string + format: date-time + description: Timestamp of the last (newest) alert in the group + most_recent_alert: + type: object + description: The most recent alert in the group. Follows the Alerts Schema and returns an :py:class:`Alert` object. + Specific fields vary between alert instances + policy_applied: + type: string + description: APPLIED, when any of the alerts in the group had actions blocked by the sensor due to a policy. + NOT_APPLIED otherwise. + tags: + type: array + description: List of tags that have been applied to the threat ID + items: + type: string + threat_notes_present: + type: boolean + description: Whether there are threat-level notes available on this threat ID + workload_count: + type: integer + description: Count of unique Kubernetes workloads where this alert can be found diff --git a/src/cbc_sdk/platform/models/policy.yaml b/src/cbc_sdk/platform/models/policy.yaml index 0e5ea3dc2..c22a0ba87 100644 --- a/src/cbc_sdk/platform/models/policy.yaml +++ b/src/cbc_sdk/platform/models/policy.yaml @@ -19,6 +19,7 @@ properties: - MISSION_CRITICAL position: type: integer + description: Relative priority of this policy within the organization. Lower values indicate higher priority. is_system: type: boolean description: Indicates that the policy was created by VMware diff --git a/src/cbc_sdk/platform/observations.py b/src/cbc_sdk/platform/observations.py index 2909d7f45..7e455f860 100644 --- a/src/cbc_sdk/platform/observations.py +++ b/src/cbc_sdk/platform/observations.py @@ -30,14 +30,7 @@ class Observation(NewBaseModel): validation_url = "/api/investigate/v2/orgs/{}/observations/search_validation" swagger_meta_file = "platform/models/observation.yaml" - def __init__( - self, - cb, - model_unique_id=None, - initial_data=None, - force_init=False, - full_doc=False, - ): + def __init__(self, cb, model_unique_id=None, initial_data=None, force_init=False, full_doc=False): """ Initialize the Observation object. @@ -51,7 +44,7 @@ def __init__( force_init (bool): True to force object initialization. full_doc (bool): False to mark the object as not fully initialized. """ - self._details_timeout = 0 + self._details_timeout = cb.credentials.default_timeout self._info = None if model_unique_id is not None and initial_data is None: observations_future = ( @@ -101,7 +94,8 @@ def get_details(self, timeout=0, async_mode=False): """Requests detailed results. Args: - timeout (int): Observations details request timeout in milliseconds. + timeout (int): Observations details request timeout in milliseconds. This may never be greater than the + configured default timeout. If this value is 0, the configured default timeout is used. async_mode (bool): True to request details in an asynchronous manner. Returns: @@ -118,7 +112,10 @@ def get_details(self, timeout=0, async_mode=False): >>> observations = api.select(Observation).where(process_pid=2000) >>> observations[0].get_details() """ - self._details_timeout = timeout + if timeout <= 0: + self._details_timeout = self._cb.credentials.default_timeout + else: + self._details_timeout = min(timeout, self._cb.credentials.default_timeout) if not self.observation_id: raise ApiError( "Trying to get observation details on an invalid observation_id" @@ -150,7 +147,8 @@ def _helper_get_details(cb, alert_id=None, observation_ids=None, bulk=False, tim alert_id (str): An alert id to fetch associated observations observation_ids (list): A list of observation ids to fetch bulk (bool): Whether it is a bulk request - timeout (int): Observations details request timeout in milliseconds. + timeout (int): Observations details request timeout in milliseconds. This may never be greater than + the configured default timeout. If this value is 0, the configured default timeout is used. Returns: Observation or list(Observation): if it is a bulk operation a list, otherwise Observation @@ -158,6 +156,8 @@ def _helper_get_details(cb, alert_id=None, observation_ids=None, bulk=False, tim Raises: ApiError: if cb is not instance of CBCloudAPI """ + if timeout <= 0 or timeout > cb.credentials.default_timeout: + timeout = cb.credentials.default_timeout if cb.__class__.__name__ != "CBCloudAPI": raise ApiError("cb argument should be instance of CBCloudAPI.") if (alert_id and observation_ids) or not (alert_id or observation_ids): @@ -186,7 +186,7 @@ def _helper_get_details(cb, alert_id=None, observation_ids=None, bulk=False, tim time.sleep(0.5) continue if completed < contacted: - if timeout != 0 and (time.time() * 1000) - submit_time > timeout: + if (time.time() * 1000) - submit_time > timeout: timed_out = True break else: @@ -226,6 +226,20 @@ def get_network_threat_metadata(self): except AttributeError: raise ApiError("No available network threat metadata.") + def deobfuscate_cmdline(self): + """ + Deobfuscates the command line of the process pointed to by the observation and returns the deobfuscated result. + + Required Permissions: + script.deobfuscation(EXECUTE) + + Returns: + dict: A dict containing information about the obfuscated command line, including the deobfuscated result. + """ + body = {"input": self.process_cmdline[0]} + result = self._cb.post_object(f"/tau/v2/orgs/{self._cb.credentials.org_key}/reveal", body) + return result.json() + @staticmethod def search_suggestions(cb, query, count=None): """ @@ -259,7 +273,8 @@ def bulk_get_details(cb, alert_id=None, observation_ids=None, timeout=0): cb (CBCloudAPI): A reference to the CBCloudAPI object. alert_id (str): An alert id to fetch associated observations observation_ids (list): A list of observation ids to fetch - timeout (int): Observations details request timeout in milliseconds. + timeout (int): Observations details request timeout in milliseconds. This may never be greater than + the configured default timeout. If this value is 0, the configured default timeout is used. Returns: list: list of Observations @@ -394,7 +409,7 @@ def __init__(self, doc_class, cb): super(ObservationQuery, self).__init__(doc_class, cb) self._default_args["rows"] = self._batch_size self._query_token = None - self._timeout = 0 + self._timeout = cb.credentials.default_timeout self._timed_out = False def or_(self, **kwargs): @@ -427,19 +442,23 @@ def set_rows(self, rows): return self def timeout(self, msecs): - """Sets the timeout on a observation query. + """ + Sets the timeout on a observation query. Arguments: - msecs (int): Timeout duration, in milliseconds. + msecs (int): Timeout duration, in milliseconds. This may never be greater than the configured default + timeout. If this value is 0, the configured default timeout is used. Returns: - Query (ObservationQuery): The Query object with new milliseconds - parameter. + Query (ObservationQuery): The Query object with new milliseconds parameter. Example: >>> cb.select(Observation).where(process_name="foo.exe").timeout(5000) """ - self._timeout = msecs + if msecs <= 0: + self._timeout = self._cb.credentials.default_timeout + else: + self._timeout = min(msecs, self._cb.credentials.default_timeout) return self def _submit(self): @@ -461,6 +480,7 @@ def _submit(self): def _still_querying(self): """Check whether there are still records to be collected.""" + assert self._timeout > 0 if not self._query_token: self._submit() @@ -478,7 +498,7 @@ def _still_querying(self): if contacted == 0: return True if completed < contacted: - if self._timeout != 0 and (time.time() * 1000) - self._submit_time > self._timeout: + if (time.time() * 1000) - self._submit_time > self._timeout: self._timed_out = True return False return True diff --git a/src/cbc_sdk/platform/policies.py b/src/cbc_sdk/platform/policies.py index cbbc308a1..d7a67e11a 100644 --- a/src/cbc_sdk/platform/policies.py +++ b/src/cbc_sdk/platform/policies.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # ******************************************************* -# Copyright (c) VMware, Inc. 2020-2023. All Rights Reserved. +# Copyright (c) VMware, Inc. 2020-2024. All Rights Reserved. # SPDX-License-Identifier: MIT # ******************************************************* # * @@ -16,8 +16,10 @@ import json from types import MappingProxyType from cbc_sdk.base import MutableBaseModel, BaseQuery, IterableQueryMixin, AsyncQueryMixin +from cbc_sdk.platform.devices import Device from cbc_sdk.platform.policy_ruleconfigs import (PolicyRuleConfig, CorePreventionRuleConfig, HostBasedFirewallRuleConfig, DataCollectionRuleConfig) +from cbc_sdk.platform.previewer import DevicePolicyChangePreview from cbc_sdk.errors import ApiError, ServerError, InvalidObjectError @@ -566,7 +568,7 @@ def build(self): new_policy["rule_configs"] = [copy.deepcopy(rcfg._info) for rcfg in self._new_rule_configs] return Policy(self._cb, None, new_policy, False, True) - def _subobject(self, name): + def _subobject(self, name): # pragma: no cover """ Returns the "subobject value" of the given attribute. @@ -1021,6 +1023,89 @@ def replace_rule_config(self, rule_config_id, new_rule_config): else: raise ApiError(f"rule configuration '{rule_config_id}' not found in policy") + def set_data_collection(self, parameter, value): + """ + Sets a data collection parameter value on any data collection rule configurations in the policy that have it. + + As a safety check, this method also validates that the type of the existing value of that parameter is the + same as the type of the new value we want to set for that parameter. + + Args: + parameter (str): The name of the parameter to set. + value (Any): The value of the parameter to set. + + Raises: + ApiError: If the parameter setting operation failed. + """ + rconf_blocks = [block for block in self.rule_configs + if block['category'] == 'data_collection' and parameter in block['parameters']] + if len(rconf_blocks) > 0: + url = f"/policyservice/v1/orgs/{self._cb.credentials.org_key}/policies/{self.id}" \ + "/rule_configs/data_collection" + for rconf_block in rconf_blocks: + if type(rconf_block['parameters'][parameter]) is type(value): + body = {"id": rconf_block['id'], "parameters": {parameter: value}} + return_data = self._cb.put_object(url, body) + fail_blocks = [block for block in return_data.json()['failed'] if block['id'] == rconf_block['id']] + if len(fail_blocks) > 0: + raise ApiError(fail_blocks[0]['message']) + rconf_block['parameters'][parameter] = value + + def set_xdr_collection(self, flag): + """ + Sets XDR collection to be enabled or disabled on this policy. + + Args: + flag (bool): ``True`` to enable XDR data collection, ``False`` to disable it. + + Raises: + ApiError: If the parameter setting operation failed. + """ + self.set_data_collection("enable_network_data_collection", flag) + + def set_auth_event_collection(self, flag): + """ + Sets auth event collection to be enabled or disabled on this policy. + + Args: + flag (bool): ``True`` to enable auth event data collection, ``False`` to disable it. + + Raises: + ApiError: If the parameter setting operation failed. + """ + self.set_data_collection("enable_auth_events", flag) + + def preview_rank_change(self, new_rank): + """ + Previews a change in the ranking of this policy, and determines how this will affect asset groups. + + Args: + new_rank (int): The new rank to give this policy. Ranks are limited to values in the range [1.._N_], + where _N_ is the total number of policies in the organization. + + Returns: + list[DevicePolicyChangePreview]: A list of objects containing data previewing the policy changes. + """ + return Policy.preview_policy_rank_changes(self._cb, [(self._model_unique_id, new_rank)]) + + def preview_add_policy_override(self, devices): + """ + Previews changes to the effective policies for devices which result from setting this policy override on them. + + Required Permissions: + org.policies (READ) + + Args: + cb (BaseAPI): Reference to API object used to communicate with the server. + devices (list): The devices which will have their policies overridden. Each entry in this list is either + an integer device ID or a ``Device`` object. + + Returns: + list[DevicePolicyChangePreview]: A list of ``DevicePolicyChangePreview`` objects representing the assets + that change which policy is effective as the result of this operation. + """ + return Device.preview_add_policy_override_for_devices(self._cb, self._model_unique_id, devices) + # --- BEGIN policy v1 compatibility methods --- @property @@ -1183,6 +1268,44 @@ def create(cls, cb): """ return Policy.PolicyBuilder(cb) + @classmethod + def preview_policy_rank_changes(cls, cb, changes_list): + """ + Previews changes in the ranking of policies, and determines how this will affect asset groups. + + Example:: + + >>> cb = CBCloudAPI(profile='sample') + >>> changes = Policy.preview_policy_rank_changes(cb, [(667251, 1)]) + >>> # also: changes = Policy.preview_policy_rank_changes(cb, [{"id": 667251, "position": 1}]) + >>> len(changes) + 2 + >>> changes[0].current_policy_id + 660578 + >>> changes[0].new_policy_id + 667251 + + Args: + cb (BaseAPI): Reference to API object used to communicate with the server. + changes_list (list): The list of proposed changes in the ranking of policies. Each change may be in + the form of a dict, in which case the "id" and "position" members are used to designate the policy ID + and the new position, or in the form of a list or tuple, in which case the first element specifies + the policy ID, and the second element specifies the new position. In all cases, "position" values are + limited to values in the range [1.._N_], where _N_ is the total number of policies in the organization. + + Returns: + list[DevicePolicyChangePreview]: A list of objects containing data previewing the policy changes. + """ + submit_list = [] + for change in changes_list: + if isinstance(change, dict): + submit_list.append({"id": change["id"], "position": change["position"]}) + elif isinstance(change, list) or isinstance(change, tuple): + submit_list.append({"id": change[0], "position": change[1]}) + ret = cb.post_object(f"/policy-assignment/v1/orgs/{cb.credentials.org_key}/policies/preview", + {"policies": submit_list}) + return [DevicePolicyChangePreview(cb, p) for p in ret.json()["preview"]] + class PolicyRule(MutableBaseModel): """ diff --git a/src/cbc_sdk/platform/previewer.py b/src/cbc_sdk/platform/previewer.py new file mode 100644 index 000000000..a875c3678 --- /dev/null +++ b/src/cbc_sdk/platform/previewer.py @@ -0,0 +1,106 @@ +#!/usr/bin/env python3 + +# ******************************************************* +# Copyright (c) VMware, Inc. 2020-2024. All Rights Reserved. +# SPDX-License-Identifier: MIT +# ******************************************************* +# * +# * DISCLAIMER. THIS PROGRAM IS PROVIDED TO YOU "AS IS" WITHOUT +# * WARRANTIES OR CONDITIONS OF ANY KIND, WHETHER ORAL OR WRITTEN, +# * EXPRESS OR IMPLIED. THE AUTHOR SPECIFICALLY DISCLAIMS ANY IMPLIED +# * WARRANTIES OR CONDITIONS OF MERCHANTABILITY, SATISFACTORY QUALITY, +# * NON-INFRINGEMENT AND FITNESS FOR A PARTICULAR PURPOSE. + +"""This module contains the ``DevicePolicyChangePreview`` object. + +When methods on ``Device``, ``Policy``, or ``AssetGroup`` are called to "preview" changes in device policy, +a list of these objects is returned. Each object represents a change in "effective" policy on one or more +devices. +""" + + +class DevicePolicyChangePreview: + """ + Contains data previewing a change in device policies. + + Changes to policies may happen via asset group memberships, policy rank changes, device policy overrides, + or other causes. + + Each one of these objects shows, for a given group of assets, the current policy that is the "effective policy" + for those assets, the new policy that will be the "effective policy" for those assets, the number of assets + affected, and which assets they are. + """ + def __init__(self, cb, preview_data): + """ + Creates a new instance of ``AssetGroupChangePreview``. + + Args: + cb (BaseAPI): Reference to API object used to communicate with the server. + preview_data (dict): Contains the preview data returned by the server API. + """ + self._cb = cb + self._preview_data = preview_data + + def __str__(self): # pragma: no cover + """Returns a string representation of the object.""" + lines = [f"{self.__class__.__name__} object, bound to {self._cb.session.server}.", '-' * 79, ''] + p = self._preview_data + lines.append(f"Current policy: #{p['current_policy']['id']} at rank {p['current_policy']['position']}") + lines.append(f" New policy: #{p['new_policy']['id']} at rank {p['new_policy']['position']}") + lines.append(f" Asset count: {p['asset_count']}") + lines.append(f" Asset query: {p['asset_query']}") + return "\n".join(lines) + + @property + def current_policy_id(self): + """The ID of the policy that is the current "effective" policy for a group of assets.""" + return self._preview_data['current_policy']['id'] + + @property + def current_policy(self): + """The ``Policy`` object that is the current "effective" policy for a group of assets.""" + return self._cb.select("Policy", self._preview_data['current_policy']['id']) + + @property + def current_policy_position(self): + """The position, or rank, of the policy that is the current "effective" policy for a group of assets.""" + return self._preview_data['current_policy']['position'] + + @property + def new_policy_id(self): + """The ID of the policy that will become the new "effective" policy for a group of assets.""" + return self._preview_data['new_policy']['id'] + + @property + def new_policy(self): + """The ``Policy`` object that will become the new "effective" policy for a group of assets.""" + return self._cb.select("Policy", self._preview_data['new_policy']['id']) + + @property + def new_policy_position(self): + """The position, or rank, of the policy that will become the new "effective" policy for a group of assets.""" + return self._preview_data['new_policy']['position'] + + @property + def asset_count(self): + """The number of assets to be affected by the change in their effective policy.""" + return self._preview_data['asset_count'] + + @property + def asset_query(self): + """ + A ``Device`` query which looks up the assets that are to be affected by the change in their effective policy. + + Once the query is created, it can be modified with additional criteria or options before it is executed. + """ + return self._cb.select("Device").where(self._preview_data['asset_query']) + + @property + def assets(self): # pragma: no cover + """ + The list of assets, i.e. ``Device`` objects, to be affected by the change in their effective policy. + + Required Permissions: + device (READ) + """ + return list(self.asset_query) diff --git a/src/cbc_sdk/platform/processes.py b/src/cbc_sdk/platform/processes.py index 5ca5853c0..ec2030fc2 100644 --- a/src/cbc_sdk/platform/processes.py +++ b/src/cbc_sdk/platform/processes.py @@ -55,10 +55,10 @@ class Process(UnrefreshableModel): ``AsyncProcessQuery``. Examples: - # use the Process GUID directly + >>> # use the Process GUID directly >>> process = api.select(Process, "WNEXFKQ7-00050603-0000066c-00000000-1d6c9acb43e29bb") - # use the Process GUID in a where() clause + >>> # use the Process GUID in a where() clause >>> process_query = api.select(Process).where(process_guid= ... "WNEXFKQ7-00050603-0000066c-00000000-1d6c9acb43e29bb") >>> process_query_results = list(process_query) @@ -127,7 +127,7 @@ def __str__(self): if attr in self.SHOW_ATTR[top_level]['fields']: try: val = str(self._info[top_level][attr]) - except UnicodeDecodeError: + except UnicodeDecodeError: # pragma: no cover val = repr(self._info[top_level][attr]) lines.append(u"{0:s} {1:>20s}: {2:s}".format(" ", attr, val)) else: @@ -136,7 +136,7 @@ def __str__(self): if attr in self.SHOW_ATTR[top_level]['fields']: try: val = str(item[attr]) - except UnicodeDecodeError: + except UnicodeDecodeError: # pragma: no cover val = repr(item[attr]) lines.append(u"{0:s} {1:>20s}: {2:s}".format(" ", attr, val)) lines.append('') @@ -193,7 +193,7 @@ def __str__(self): if attr in self.SHOW_ATTR['top']: try: val = str(self._info[attr]) - except UnicodeDecodeError: + except UnicodeDecodeError: # pragma: no cover val = repr(self._info[attr]) lines.append(u"{0:s} {1:>20s}: {2:s}".format(" ", attr, val)) @@ -203,7 +203,7 @@ def __str__(self): if attr in self.SHOW_ATTR['children']: try: val = str(child[attr]) - except UnicodeDecodeError: + except UnicodeDecodeError: # pragma: no cover val = repr(child[attr]) lines.append(u"{0:s} {1:>20s}: {2:s}".format(" ", attr, val)) lines.append('') @@ -238,6 +238,11 @@ def __init__(self, cb, model_unique_id=None, initial_data=None, force_init=False super(Process, self).__init__(cb, model_unique_id=model_unique_id, initial_data=initial_data, force_init=force_init, full_doc=full_doc) + def _retrieve_cb_info(self): # pragma: no cover + """Retrieve the detailed information about this object.""" + self._details_timeout = self._cb.credentials.default_timeout + return self._get_detailed_results()._info + @property def summary(self): """Returns organization-specific information about this process.""" @@ -316,6 +321,22 @@ def process_pids(self): else: return None + def deobfuscate_cmdline(self): + """ + Deobfuscates the command line of the process and returns the deobfuscated result. + + Required Permissions: + script.deobfuscation(EXECUTE) + + Returns: + dict: A dict containing information about the obfuscated command line, including the deobfuscated result. + """ + body = {"input": self.process_cmdline[0]} + if not body['input']: + body['input'] = self.get_details()['process_cmdline'][0] + result = self._cb.post_object(f"/tau/v2/orgs/{self._cb.credentials.org_key}/reveal", body) + return result.json() + def events(self, **kwargs): """ Returns a query for events associated with this process's process GUID. @@ -352,7 +373,8 @@ def get_details(self, timeout=0, async_mode=False): org.search.events(CREATE, READ) Args: - timeout (int): Event details request timeout in milliseconds. + timeout (int): Event details request timeout in milliseconds. This value can never be greater than the + configured default timeout. If this value is 0, the configured default timeout is used. async_mode (bool): ``True`` to request details in an asynchronous manner. Returns: @@ -360,7 +382,10 @@ def get_details(self, timeout=0, async_mode=False): retrieve the results. dict: If ``async_mode`` is ``False``. """ - self._details_timeout = timeout + if timeout <= 0: + self._details_timeout = self._cb.credentials.default_timeout + else: + self._details_timeout = min(timeout, self._cb.credentials.default_timeout) if not self.process_guid: raise ApiError("Trying to get process details on an invalid process_guid") if async_mode: @@ -370,6 +395,7 @@ def get_details(self, timeout=0, async_mode=False): def _get_detailed_results(self): """Actual search details implementation""" + assert self._details_timeout > 0 args = {"process_guids": [self.process_guid]} url = "/api/investigate/v2/orgs/{}/processes/detail_jobs".format(self._cb.credentials.org_key) query_start = self._cb.post_object(url, body=args) @@ -390,7 +416,7 @@ def _get_detailed_results(self): time.sleep(.5) continue if searchers_completed < searchers_contacted: - if self._details_timeout != 0 and (time.time() * 1000) - submit_time > self._details_timeout: + if (time.time() * 1000) - submit_time > self._details_timeout: timed_out = True break else: @@ -596,7 +622,7 @@ def __init__(self, doc_class, cb): """ super(AsyncProcessQuery, self).__init__(doc_class, cb) self._query_token = None - self._timeout = 0 + self._timeout = cb.credentials.default_timeout self._timed_out = False def timeout(self, msecs): @@ -604,7 +630,8 @@ def timeout(self, msecs): Sets the timeout on a process query. Arguments: - msecs (int): Timeout duration, in milliseconds. + msecs (int): Timeout duration, in milliseconds. This can never be greater than the configured default + timeout. If this is 0, the configured default timeout is used. Returns: AsyncProcessQuery: The modified query object. @@ -612,7 +639,10 @@ def timeout(self, msecs): Example: >>> cb.select(Process).where(process_name="foo.exe").timeout(5000) """ - self._timeout = msecs + if msecs <= 0: + self._timeout = self._cb.credentials.default_timeout + else: + self._timeout = min(msecs, self._cb.credentials.default_timeout) return self def set_rows(self, rows): @@ -663,6 +693,7 @@ def _still_querying(self): Required Permissions: org.search.events(CREATE, READ) """ + assert self._timeout > 0 if not self._query_token: self._submit() @@ -678,7 +709,7 @@ def _still_querying(self): if searchers_contacted == 0: return True if searchers_completed < searchers_contacted: - if self._timeout != 0 and (time.time() * 1000) - self._submit_time > self._timeout: + if (time.time() * 1000) - self._submit_time > self._timeout: self._timed_out = True return False return True @@ -825,7 +856,7 @@ def __init__(self, doc_class, cb): self._query_builder = QueryBuilder() self._query_token = None self._full_init = False - self._timeout = 0 + self._timeout = cb.credentials.default_timeout self._timed_out = False self._time_range = {} @@ -834,7 +865,8 @@ def timeout(self, msecs): Sets the timeout on a process query. Arguments: - msecs (int): Timeout duration, in milliseconds. + msecs (int): Timeout duration, in milliseconds. This can never be greater than the configured default + timeout. If this value is 0, the configured default timeout is used. Returns: SummaryQuery: The modified query object. @@ -842,7 +874,10 @@ def timeout(self, msecs): Example: >>> cb.select(Process).where(process_name="foo.exe").timeout(5000) """ - self._timeout = msecs + if msecs <= 0: + self._timeout = self._cb.credentials.default_timeout + else: + self._timeout = min(msecs, self._cb.credentials.default_timeout) return self def set_time_range(self, start=None, end=None, window=None): @@ -926,6 +961,7 @@ def _still_querying(self): Required Permissions: org.search.events(CREATE, READ) """ + assert self._timeout > 0 if not self._query_token: self._submit() @@ -941,7 +977,7 @@ def _still_querying(self): if searchers_contacted == 0: return True if searchers_completed < searchers_contacted: - if self._timeout != 0 and (time.time() * 1000) - self._submit_time > self._timeout: + if (time.time() * 1000) - self._submit_time > self._timeout: self._timed_out = True return False return True diff --git a/src/cbc_sdk/rest_api.py b/src/cbc_sdk/rest_api.py index 2927f5198..3a075df80 100644 --- a/src/cbc_sdk/rest_api.py +++ b/src/cbc_sdk/rest_api.py @@ -68,7 +68,7 @@ def __init__(self, *args, **kwargs): proxy_session (requests.session.Session): Proxy session to be used for cookie persistence, connection pooling, and configuration. Default is ``None`` (use the standard session). thread_pool_count (int): The number of threads to create for asynchronous queries. Defaults to 3. - timeout (float): The timeout to use for for API requests. Default is ``None`` (no timeout). + timeout (float): The timeout to use for for API connection requests. Default is ``None`` (no timeout). token (str): The API token to use when accessing the Carbon Black Cloud. url (str): The URL of the Carbon Black Cloud provider to use. """ diff --git a/src/tests/unit/audit_remediation/test_audit_remediation_base.py b/src/tests/unit/audit_remediation/test_audit_remediation_base.py index 480c0e20d..146c17e9a 100644 --- a/src/tests/unit/audit_remediation/test_audit_remediation_base.py +++ b/src/tests/unit/audit_remediation/test_audit_remediation_base.py @@ -18,6 +18,7 @@ ASYNC_BROKEN_1, ASYNC_BROKEN_2, ASYNC_BROKEN_3, ASYNC_FACETING) from tests.unit.fixtures.platform.mock_jobs import JOB_DETAILS_1 +from tests.unit.fixtures.audit_remediation.mock_scroll import GET_SCROLL_RESULTS, SINGLE_RESULT log = logging.basicConfig(format='%(asctime)s %(levelname)s:%(message)s', level=logging.DEBUG, filename='log.txt') @@ -123,7 +124,8 @@ def test_result_query_criteria(cbcsdk_mock): def test_result_query_update_criteria(cbcsdk_mock): """Testing the public update_criteria() function accessing private _update_criteria().""" api = cbcsdk_mock.api - query = api.select(Result).run_id(2).update_criteria("my.key.dot.notation", ["criteria_val_1"]) + query = api.select(Result).run_id("qcu6wghci1oqfrsgvcrrm1o71bzcy7cx") \ + .update_criteria("my.key.dot.notation", ["criteria_val_1"]) query = query.update_criteria("my.key.dot.notation", ["criteria_val_2"]) assert query._build_request(start=0, rows=100) == {"criteria": { "my.key.dot.notation": ["criteria_val_1", "criteria_val_2"] @@ -151,8 +153,8 @@ def test_facet_query_criteria(cbcsdk_mock): def test_result_facet_query_update_criteria(cbcsdk_mock): """Testing the public update_criteria() function accessing private _update_criteria().""" api = cbcsdk_mock.api - query = api.select(ResultFacet).run_id(2).update_criteria("my.key.dot.notation", - ["criteria_val_1", "criteria_val_2"]) + query = api.select(ResultFacet).run_id("qcu6wghci1oqfrsgvcrrm1o71bzcy7cx") \ + .update_criteria("my.key.dot.notation", ["criteria_val_1", "criteria_val_2"]) assert query._build_request(rows=100) == {"criteria": { "my.key.dot.notation": ["criteria_val_1", "criteria_val_2"] }, "terms": {"fields": [], "rows": 100}} @@ -478,3 +480,72 @@ def test_run_async_faceting_query(cbcsdk_mock): assert len(result) == 1 assert result[0].field == 'fields.version' assert len(result[0].values) == 4 + + +def test_result_set_run_ids(cbcsdk_mock): + """Testing set_run_ids""" + api = cbcsdk_mock.api + query = api.select(Result).set_run_ids(["abcdefghijklmnopqrstuvwxyz123456", "fckjyssfusuuutlkpocky82luvnl0sol"]) + assert query._criteria["run_id"] == ["abcdefghijklmnopqrstuvwxyz123456", "fckjyssfusuuutlkpocky82luvnl0sol"] + + +def test_result_set_time_received(cbcsdk_mock): + """Testing set_time_received""" + api = cbcsdk_mock.api + query = api.select(Result).set_time_received(range="-3h") + assert query._criteria["time_received"] == {"range": "-3h"} + + query.set_time_received(start="2023-12-10T00:00:00.000Z", end="2023-12-11T00:00:00.000Z") + assert query._criteria["time_received"] == { + "start": "2023-12-10T00:00:00.000Z", + "end": "2023-12-11T00:00:00.000Z" + } + + with pytest.raises(ApiError): + query.set_time_received(start="2023-12-10T00:00:00.000Z", end="2023-12-11T00:00:00.000Z", range="-3h") + + +def test_result_scroll(cbcsdk_mock): + """Testing ResultQuery scroll""" + cbcsdk_mock.mock_request("POST", "/livequery/v1/orgs/test/runs/results/_scroll", + GET_SCROLL_RESULTS(100, 200, 100)) + + api = cbcsdk_mock.api + query = api.select(Result).set_time_received(range="-3h") + + results = query.scroll(100) + + assert query.num_remaining == 100 + assert query._search_after == "MTcwMjMyMTM2MDU3OSwyMT" + + def on_post(url, body, **kwargs): + """Test 2nd scroll request""" + assert body == { + "criteria": { + "time_received": {"range": "-3h"}}, + "rows": 10000, + "search_after": "MTcwMjMyMTM2MDU3OSwyMT" + } + return GET_SCROLL_RESULTS(100, 200, 0) + + cbcsdk_mock.mock_request("POST", "/livequery/v1/orgs/test/runs/results/_scroll", + on_post) + + results.extend(query.scroll(20000)) + + assert len(results) == 200 + + assert query.scroll(100) == [] + + +def test_result_to_json(cbcsdk_mock): + """Testing ResultQuery scroll""" + cbcsdk_mock.mock_request("POST", "/livequery/v1/orgs/test/runs/results/_scroll", + GET_SCROLL_RESULTS(1, 1, 1)) + + api = cbcsdk_mock.api + query = api.select(Result).set_time_received(range="-3h") + + results = query.scroll(1) + + results[0].to_json() == SINGLE_RESULT diff --git a/src/tests/unit/base/test_base_models.py b/src/tests/unit/base/test_base_models.py index 73b93feae..85969ca8e 100644 --- a/src/tests/unit/base/test_base_models.py +++ b/src/tests/unit/base/test_base_models.py @@ -544,3 +544,12 @@ def test_str_attr_line(cb): assert rendering[4] == ' [1]: [_TestBaseModel object]:' assert rendering[5] == ' id: 128' assert rendering[6] == '' + + +def test_to_json(cbcsdk_mock): + """Test _retrieve_cb_info method of NewBaseModel""" + api = cbcsdk_mock.api + cbcsdk_mock.mock_request("GET", "/testing_only/v1/stubobjects/30241", STUBOBJECT_GET_RESP) + stub = StubObject(api, 30241) + stub.refresh() + assert stub.to_json() == STUBOBJECT_GET_RESP diff --git a/src/tests/unit/credential_providers/test_aws_secrets_manager.py b/src/tests/unit/credential_providers/test_aws_secrets_manager.py index 0dc5522b0..d3b49b8db 100644 --- a/src/tests/unit/credential_providers/test_aws_secrets_manager.py +++ b/src/tests/unit/credential_providers/test_aws_secrets_manager.py @@ -52,7 +52,8 @@ def test_aws_getting_credentials(monkeypatch): 'csp_api_token': None, 'csp_oauth_app_id': None, 'csp_oauth_app_secret': None, - 'csp_url_override': 'https://console.cloud.vmware.com' + 'csp_url_override': 'https://console.cloud.vmware.com', + 'default_timeout': 256000 } monkeypatch.setattr(Session, "client", ClientMock) diff --git a/src/tests/unit/credential_providers/test_registry.py b/src/tests/unit/credential_providers/test_registry.py index 2de02d601..43699f899 100755 --- a/src/tests/unit/credential_providers/test_registry.py +++ b/src/tests/unit/credential_providers/test_registry.py @@ -137,6 +137,41 @@ def test_read_bool_exceptions(monkeypatch, mox): mox.VerifyAll() +@pytest.mark.parametrize('key, return_val, check_val', [ + ('Alpha', (0, REG_DWORD), 0), + ('Bravo', (5, REG_DWORD), 5), + ('Charlie', None, None) +]) +def test_read_int(monkeypatch, mox, key, return_val, check_val): + """Test reading integer values from the registry.""" + monkeypatch.setattr(sys, "platform", "win32") + sut = RegistryCredentialProvider() + mox.StubOutWithMock(sut, '_read_value') + stub_key = StubKeyObject() + sut._read_value(stub_key, key).AndReturn(return_val) + mox.ReplayAll() + assert sut._read_int(stub_key, key) == check_val + mox.VerifyAll() + + +def test_read_int_exceptions(monkeypatch, mox): + """Test reading integer values from the registry, in ways that generate exceptions.""" + monkeypatch.setattr(sys, "platform", "win32") + sut = RegistryCredentialProvider() + mox.StubOutWithMock(sut, '_read_value') + stub_key = StubKeyObject() + sut._read_value(stub_key, "Alpha").AndReturn(("!Funky!Stuff!", REG_SZ)) + sut._read_value(stub_key, "Bravo").AndRaise(CredentialError("Unable to read")) + mox.ReplayAll() + with pytest.raises(CredentialError) as e1: + sut._read_int(stub_key, "Alpha") + assert "not of integer type" in str(e1.value) + with pytest.raises(CredentialError) as e2: + sut._read_int(stub_key, "Bravo") + assert "Unable to read" in str(e2.value) + mox.VerifyAll() + + def test_read_credentials(monkeypatch, mox): """Test reading an entire Credentials object from the registry.""" monkeypatch.setattr(sys, "platform", "win32") @@ -157,6 +192,7 @@ def test_read_credentials(monkeypatch, mox): sut._read_value(stub_key, "csp_oauth_app_secret").AndReturn(("SECRET", REG_SZ)) sut._read_value(stub_key, "csp_api_token").AndReturn(("API TOKEN", REG_SZ)) sut._read_value(stub_key, "csp_url_override").AndReturn(("http://csp.com", REG_SZ)) + sut._read_value(stub_key, "default_timeout").AndReturn((256000, REG_DWORD)) mox.ReplayAll() creds = sut._read_credentials(stub_key) mox.VerifyAll() @@ -174,6 +210,7 @@ def test_read_credentials(monkeypatch, mox): assert creds.csp_oauth_app_secret == "SECRET" assert creds.csp_api_token == "API TOKEN" assert creds.csp_url_override == "http://csp.com" + assert creds.default_timeout == 256000 def test_read_credentials_defaults(monkeypatch, mox): @@ -196,6 +233,7 @@ def test_read_credentials_defaults(monkeypatch, mox): sut._read_value(stub_key, "csp_oauth_app_secret").AndReturn(None) sut._read_value(stub_key, "csp_api_token").AndReturn(None) sut._read_value(stub_key, "csp_url_override").AndReturn(None) + sut._read_value(stub_key, "default_timeout").AndReturn(None) mox.ReplayAll() creds = sut._read_credentials(stub_key) mox.VerifyAll() @@ -239,6 +277,7 @@ def test_get_credentials(monkeypatch, mox): sut._read_value(key2, "csp_oauth_app_secret").AndReturn(("SECRET", REG_SZ)) sut._read_value(key2, "csp_api_token").AndReturn(("API-TOKEN", REG_SZ)) sut._read_value(key2, "csp_url_override").AndReturn(("http://csp.com", REG_SZ)) + sut._read_value(key2, "default_timeout").AndReturn((256000, REG_DWORD)) mox.ReplayAll() creds = sut.get_credentials('default') assert creds.url == "http://example.com" @@ -255,6 +294,7 @@ def test_get_credentials(monkeypatch, mox): assert creds.csp_oauth_app_secret == "SECRET" assert creds.csp_api_token == "API-TOKEN" assert creds.csp_url_override == "http://csp.com" + assert creds.default_timeout == 256000 creds2 = sut.get_credentials('default') assert creds2 is creds mox.VerifyAll() diff --git a/src/tests/unit/endpoint_standard/test_endpoint_standard_enriched_events.py b/src/tests/unit/endpoint_standard/test_endpoint_standard_enriched_events.py index e1634aaf8..9771ad9bc 100644 --- a/src/tests/unit/endpoint_standard/test_endpoint_standard_enriched_events.py +++ b/src/tests/unit/endpoint_standard/test_endpoint_standard_enriched_events.py @@ -274,9 +274,15 @@ def test_enriched_event_timeout(cbcsdk_mock): """Testing EnrichedEventQuery.timeout().""" api = cbcsdk_mock.api query = api.select(EnrichedEvent).where("event_id:some_id") - assert query._timeout == 0 + assert query._timeout == 300000 query.timeout(msecs=500) assert query._timeout == 500 + query.timeout(msecs=999999) + assert query._timeout == 300000 + query.timeout(msecs=700) + assert query._timeout == 700 + query.timeout(msecs=0) + assert query._timeout == 300000 def test_enriched_event_timeout_error(cbcsdk_mock): diff --git a/src/tests/unit/endpoint_standard/test_endpoint_standard_enriched_events_facet.py b/src/tests/unit/endpoint_standard/test_endpoint_standard_enriched_events_facet.py index d9649dc5a..73c2adacf 100644 --- a/src/tests/unit/endpoint_standard/test_endpoint_standard_enriched_events_facet.py +++ b/src/tests/unit/endpoint_standard/test_endpoint_standard_enriched_events_facet.py @@ -109,9 +109,15 @@ def test_enriched_event_facet_timeout(cbcsdk_mock): """Testing EnrichedEventQuery.timeout().""" api = cbcsdk_mock.api query = api.select(EnrichedEventFacet).where("process_name:some_name").add_facet_field("process_name") - assert query._timeout == 0 + assert query._timeout == 300000 query.timeout(msecs=500) assert query._timeout == 500 + query.timeout(msecs=999999) + assert query._timeout == 300000 + query.timeout(msecs=700) + assert query._timeout == 700 + query.timeout(msecs=0) + assert query._timeout == 300000 def test_enriched_event_facet_timeout_error(cbcsdk_mock): diff --git a/src/tests/unit/enterprise_edr/test_auth_events.py b/src/tests/unit/enterprise_edr/test_auth_events.py index 610ae9844..412bc30e6 100644 --- a/src/tests/unit/enterprise_edr/test_auth_events.py +++ b/src/tests/unit/enterprise_edr/test_auth_events.py @@ -411,9 +411,15 @@ def test_auth_event_timeout(cbcsdk_mock): """Testing AuthEventQuery.timeout().""" api = cbcsdk_mock.api query = api.select(AuthEvent).where("event_id:some_id") - assert query._timeout == 0 + assert query._timeout == 300000 query.timeout(msecs=500) assert query._timeout == 500 + query.timeout(msecs=999999) + assert query._timeout == 300000 + query.timeout(msecs=700) + assert query._timeout == 700 + query.timeout(msecs=0) + assert query._timeout == 300000 def test_auth_event_timeout_error(cbcsdk_mock): @@ -739,9 +745,15 @@ def test_auth_event_facet_timeout(cbcsdk_mock): .where("process_name:some_name") .add_facet_field("process_name") ) - assert query._timeout == 0 + assert query._timeout == 300000 query.timeout(msecs=500) assert query._timeout == 500 + query.timeout(msecs=999999) + assert query._timeout == 300000 + query.timeout(msecs=700) + assert query._timeout == 700 + query.timeout(msecs=0) + assert query._timeout == 300000 def test_auth_event_facet_timeout_error(cbcsdk_mock): diff --git a/src/tests/unit/fixtures/audit_remediation/mock_scroll.py b/src/tests/unit/fixtures/audit_remediation/mock_scroll.py new file mode 100644 index 000000000..3c4571589 --- /dev/null +++ b/src/tests/unit/fixtures/audit_remediation/mock_scroll.py @@ -0,0 +1,33 @@ +"""Mocks for Live Query Result Scroll""" + +SINGLE_RESULT = { + "id": "oc5c5q9yc1mv107wuaxj6xqpmaoezwrh", + "device": { + "id": 21665421, + "name": "psc-auto-centos75", + "policy_id": 17567367, + "policy_name": "0", + "os": "LINUX" + }, + "status": "matched", + "time_received": "2023-12-11T19:02:40.579Z", + "device_message": "", + "fields": { + "cmdline": "/usr/sbin/NetworkManager --no-daemon", + "cwd": "/", + "name": "NetworkManager", + "on_disk": 1, + "path": "/usr/sbin/NetworkManager" + } +} + + +def GET_SCROLL_RESULTS(rows, num_found, num_remaining): + """Generate results response based on num_remaining""" + return { + "org_key": "test", + "num_found": num_found, + "num_remaining": num_remaining, + "search_after": "MTcwMjMyMTM2MDU3OSwyMT" if num_remaining > 0 else "", + "results": [SINGLE_RESULT for _ in range(rows)] + } diff --git a/src/tests/unit/fixtures/enterprise_edr/mock_threatintel.py b/src/tests/unit/fixtures/enterprise_edr/mock_threatintel.py index 778927a8a..f4bb6a0fe 100644 --- a/src/tests/unit/fixtures/enterprise_edr/mock_threatintel.py +++ b/src/tests/unit/fixtures/enterprise_edr/mock_threatintel.py @@ -1266,6 +1266,21 @@ "065fb68d-42a8-4b2e-8f91-17f925f54356" ] +GET_WATCHLIST_OBJECT_RESP = { + "alerts_enabled": True, + "classifier": { + "key": "feed_id", + "value": "vnbrUmClRh2Mh8398QtJww" + }, + "create_timestamp": 1657561064, + "description": "scale and performance automation test feed", + "id": "mnbvc098766HN60hatQMQ", + "last_update_timestamp": 1692286217, + "name": "AMSI Threat Intelligence", + "report_ids": None, + "tags_enabled": True +} + @pytest.fixture(scope="function") def get_watchlist_report(): diff --git a/src/tests/unit/fixtures/platform/mock_alerts_v7.py b/src/tests/unit/fixtures/platform/mock_alerts_v7.py index 944bc7565..fb4c033b4 100644 --- a/src/tests/unit/fixtures/platform/mock_alerts_v7.py +++ b/src/tests/unit/fixtures/platform/mock_alerts_v7.py @@ -105,8 +105,8 @@ "device_policy_id": 112221, "device_os": "WINDOWS", "device_os_version": "Windows Server 2019 x64", "device_username": "rahul.gopi@devo.com", - "device_location": "UNKNOWN", "device_external_ip": "34.234.170.45", - "device_internal_ip": "10.0.14.120", + "device_location": "UNKNOWN", "device_external_ip": "10.10.10.10", + "device_internal_ip": "10.10.10.10", "mdr_alert": "false", "mdr_alert_notes_present": "false", "mdr_threat_notes_present": "false", "report_id": "MLRtPcpQGKFh5OE4BT3tQ-49760e2e-c1e4-42e9-8157-4084ff002bcc", @@ -586,3 +586,1169 @@ "type": "WATCHLIST", "workflow": {"status": "OPEN"} } + +GET_ALERT_OBFUSCATED_CMDLINE = { + "org_key": "ABCD1234", + "alert_url": "https://defense.conferdeploy.net/alerts?s[c][query_string]= \ + id:52fa009d-e2d1-4118-8a8d-04f521ae66aa&orgKey=ABCD1234", + "id": "12ab345cd6-e2d1-4118-8a8d-04f521ae66aa", "type": "WATCHLIST", + "backend_timestamp": "2023-04-14T21:30:40.570Z", "user_update_timestamp": None, + "backend_update_timestamp": "2023-04-14T21:30:40.570Z", + "detection_timestamp": "2023-04-14T21:27:14.719Z", + "first_event_timestamp": "2023-04-14T21:21:42.193Z", + "last_event_timestamp": "2023-04-14T21:21:42.193Z", + "severity": 8, + "reason": "Process infdefaultinstall.exe was detected by the report\ + \"Defense Evasion - \" in 6 watchlists", + "reason_code": "05696200-88e6-3691-a1e3-8d9a64dbc24e:7828aec8-8502-3a43-ae68-41b5050dab5b", + "threat_id": "0569620088E6669121E38D9A64DBC24E", "primary_event_id": "-7RlZFHcSGWKSrF55B_4Ig-0", + "policy_applied": "NOT_APPLIED", "run_state": "RAN", "sensor_action": "ALLOW", + "workflow": {"change_timestamp": "2023-04-14T21:30:40.570Z", "changed_by_type": "SYSTEM", + "changed_by": "ALERT_CREATION", "closure_reason": "NO_REASON", "status": "OPEN"}, + "determination": None, + "tags": ["tag1", "tag2"], "alert_notes_present": False, "threat_notes_present": False, + "is_updated": False, + "device_id": 18118174, "device_name": "demo-machine", "device_uem_id": "", + "device_target_value": "LOW", + "device_policy": "123abcde-c21b-4d64-9e3e-53595ef9c7af", "device_policy_id": 1234567, + "device_os": "WINDOWS", + "device_os_version": "Windows 10 x64 SP: 1", "device_username": "demouser@demoorg.com", + "device_location": "UNKNOWN", "device_external_ip": "1.2.3.4", "mdr_alert": False, + "report_id": "oJFtoawGS92fVMXlELC1Ow-b4ee93fc-ec58-436a-a940-b4d33a613513", + "report_name": "Defense Evasion - Signed Binary Proxy Execution - InfDefaultInstall", + "report_description": "\n\nThreat:\nThis behavior may be abused by adversaries to execute malicious\ + files that could bypass application whitelisting and signature validation on systems.\n\nFalse \ + Positives:\nSome environments may legitimate use this, but should be rare.\n\nScore:\n85", + "report_tags": ["attack", "attackframework", "threathunting"], + "report_link": "https://attack.mitre.org/wiki/Technique/T1218", + "ioc_id": "b4ee93fc-ec58-436a-a940-b4d33a613513-0", + "ioc_hit": "((process_name:InfDefaultInstall.exe)) -enriched:true", + "watchlists": [{"id": "9x0timurQkqP7FBKX4XrUw", "name": "Carbon Black Advanced Threats"}], + "process_guid": "ABC12345-000309c2-00000478-00000000-1d6a1c1f2b02805", "process_pid": 10980, + "process_name": "powershell.exe", + "process_sha256": "1a2345cd88666a458f804e5d0fe925a9f55cf016733458c58c1980addc44cd774", + "process_md5": "12c34567894a49f13193513b0138f72a9", "process_effective_reputation": "LOCAL_WHITE", + "process_reputation": "NOT_LISTED", + "process_cmdline": "powershell.exe -encodedcommand VwByAGkAdABlAC0ATwB1AHQAcAB1AHQAIAAiAE4AbwAgAG0AYQB0AHQAZQByACAAaABvAHcAIAB0AGgAaQBuACAAeQBvAHUAIABzAGwAaQBjAGUAIABpAHQALAAgAGkAdAAnAHMAIABzAHQAaQBsAGwAIABiAGEAbABvAG4AZQB5AC4AIgA=", # noqa: E501 + "process_username": "DEMO\\DEMOUSER", "process_issuer": "Demo Code Signing CA - G2", + "process_publisher": "Demo Test Authority", "childproc_guid": "", "childproc_username": "", + "childproc_cmdline": "", + "ml_classification_final_verdict": "NOT_ANOMALOUS", "ml_classification_global_prevalence": "LOW", + "ml_classification_org_prevalence": "LOW" +} + +ALERT_DEOBFUSCATE_CMDLINE_REQUEST = { + "input": "powershell.exe -encodedcommand VwByAGkAdABlAC0ATwB1AHQAcAB1AHQAIAAiAE4AbwAgAG0AYQB0AHQAZQByACAAaABvAHcAIAB0AGgAaQBuACAAeQBvAHUAIABzAGwAaQBjAGUAIABpAHQALAAgAGkAdAAnAHMAIABzAHQAaQBsAGwAIABiAGEAbABvAG4AZQB5AC4AIgA=" # noqa: E501 +} + +ALERT_DEOBFUSCATE_CMDLINE_RESPONSE = { + "original_code": "Write-Output \"No matter how thin you slice it, it's still baloney.\"\n", + "deobfuscated_code": "Write-Output \"No matter how thin you slice it, it's still baloney.\"\n", + "identities": [ + "Write-Output" + ], + "strings": [ + "No matter how thin you slice it, it's still baloney." + ], + "obfuscation_level": 0.0 +} + +GROUP_SEARCH_ALERT_REQUEST = { + "group_by": { + "field": "THREAT_ID" + }, + "time_range": { + "range": "-10d" + }, + "criteria": { + "type": [ + "WATCHLIST" + ], + "minimum_severity": 1 + }, + "rows": 1, + "sort": [ + { + "field": "count", + "order": "DESC" + } + ] +} + +GROUP_SEARCH_ALERT_RESPONSE = { + "num_found": 6, + "num_available": 6, + "results": [ + { + "count": 1167, + "workflow_states": { + "OPEN": 1167 + }, + "determination_values": { + "NONE": 1167 + }, + "ml_classification_final_verdicts": {}, + "first_alert_timestamp": "2023-10-20T17:57:41.734Z", + "last_alert_timestamp": "2023-10-30T17:19:22.943Z", + "highest_severity": 1, + "policy_applied": "NOT_APPLIED", + "threat_notes_present": False, + "tags": [], + "device_count": 11, + "workload_count": 0, + "most_recent_alert": { + "org_key": "ABCD1234", + "alert_url": "defense.conferdeploy.net/alerts?s[c][query_string]=\ + id:887e6bbc-6224-4f36-ad37-084038b7fcab&orgKey=ABC12345", + "id": "d6f05ce5-23ad-4cf0-a4d8-ee564396b2d1", + "type": "WATCHLIST", + "backend_timestamp": "2023-10-30T17:32:02.851Z", + "user_update_timestamp": None, + "backend_update_timestamp": "2023-10-30T17:32:02.851Z", + "detection_timestamp": "2023-10-30T17:30:04.587Z", + "first_event_timestamp": "2023-10-30T17:19:22.943Z", + "last_event_timestamp": "2023-10-30T17:19:22.943Z", + "severity": 1, + "reason": "Process cmd.exe was detected by the report \"scale and performance test report\" in " + "watchlist \"perf_automation_feed_qrcialhx\"", + "reason_code": "5319dc28-4f8b-3a9f-84c6-e045c5e186ff:920e0ded-e95b-3d77-8eaa-e28bdaa133f2", + "threat_id": "5319DC284F8B2A9FC4C6E045C5E186FF", + "primary_event_id": "o6NwqERTQf6eYlD0kvpRLw-0", + "policy_applied": "NOT_APPLIED", + "run_state": "RAN", + "sensor_action": "ALLOW", + "workflow": { + "change_timestamp": "2023-10-30T17:32:02.851Z", + "changed_by_type": "SYSTEM", + "changed_by": "ALERT_CREATION", + "closure_reason": "NO_REASON", + "status": "OPEN" + }, + "determination": { + "change_timestamp": "2023-10-30T17:32:02.851Z", + "value": "NONE", + "changed_by_type": "SYSTEM", + "changed_by": "ALERT_CREATION" + }, + "tags": None, + "alert_notes_present": False, + "threat_notes_present": False, + "asset_id": None, + "is_updated": False, + "device_id": 18118176, + "device_name": "pscr-test-01-1677785033.788122-22", + "device_uem_id": "", + "device_target_value": "LOW", + "device_policy": "Pscr SE Testing", + "device_policy_id": 465946, + "device_os": "WINDOWS", + "device_os_version": "Windows 10 x64 SP: 1", + "device_username": "pscr-test-01-1677785033.788122-22@carbonblack.com", + "device_location": "UNKNOWN", + "device_external_ip": "10.10.10.10", + "mdr_alert": False, + "mdr_alert_notes_present": False, + "mdr_threat_notes_present": False, + "report_id": "vnbrUmClRh2Mh8398QtJww-scale_perf_automation_report01_qrcialhx", + "report_name": "scale and performance test report", + "report_description": "scale and performance test description", + "report_tags": [], + "ioc_id": "scale_perf_automation_report01_ioc01_qrcialhx", + "ioc_hit": "process_name:cmd.exe", + "watchlists": [ + { + "id": "gSpaq0J9QB1qRY3lEdAw", + "name": "perf_automation_feed_qrcialhx" + } + ], + "process_guid": "ABCD1234-01147620-00780012-00000000-19db1ded53e8000", + "process_pid": 7864338, + "process_name": "cmd.exe", + "process_sha256": "bb5743ff9ce542b7018d712597b2f3e2868e89feaf8d76253324644fbeda1899", + "process_md5": "0a56e038d66da45947f8fdf130aef2d5", + "process_effective_reputation": "LOCAL_WHITE", + "process_reputation": "NOT_LISTED", + "process_cmdline": "cmd.exe /c InfDefaultInstall.exe C:\\Users\\bit9qa\\AtomicRedTeam\\" + "atomic-red-team-vmware-develop\\atomics\\T1218\\src\\Infdefaultinstall.inf", + "process_username": "NT AUTHORITY\\SYSTEM", + "process_issuer": [ + "Moravec Code Signing CA - G2" + ], + "process_publisher": [ + "Moravec Test Authority" + ], + "childproc_guid": "", + "childproc_username": "", + "childproc_cmdline": "" + } + }, + { + "count": 623, + "workflow_states": { + "OPEN": 623 + }, + "determination_values": { + "NONE": 623 + }, + "ml_classification_final_verdicts": {}, + "first_alert_timestamp": "2023-10-20T19:05:14.179Z", + "last_alert_timestamp": "2023-10-30T17:27:55.845Z", + "highest_severity": 5, + "policy_applied": "NOT_APPLIED", + "threat_notes_present": False, + "tags": [], + "device_count": 5, + "workload_count": 0, + "most_recent_alert": { + "org_key": "ABCD1234", + "alert_url": "defense.conferdeploy.net/alerts?s[c][query_string]=\ + id:887e6bbc-6224-4f36-ad37-084038b7fcab&orgKey=ABC12345", + "id": "9ae95e50-93a2-4b84-b6b0-0442be20b690", + "type": "WATCHLIST", + "backend_timestamp": "2023-10-30T17:36:05.423Z", + "user_update_timestamp": None, + "backend_update_timestamp": "2023-10-30T17:36:05.423Z", + "detection_timestamp": "2023-10-30T17:35:16.949Z", + "first_event_timestamp": "2023-10-30T17:27:55.845Z", + "last_event_timestamp": "2023-10-30T17:27:55.845Z", + "severity": 5, + "reason": "Process trustedinstaller.exe was detected by the report \"mdr-th-test-r-1\" in watchlist " + "\"mdr-th-test-1\"", + "reason_code": "daa13aef-606f-3d75-a123-f8169b1c8a91:caf657fc-2aa9-3f4a-ad4b-9f41faa8cb30", + "threat_id": "DAA13AEF606F1D752123F8169B1C8A91", + "primary_event_id": "vl8Z5QbSQ5qmFs19P2S-gw-0", + "policy_applied": "NOT_APPLIED", + "run_state": "RAN", + "sensor_action": "ALLOW", + "workflow": { + "change_timestamp": "2023-10-30T17:36:05.423Z", + "changed_by_type": "SYSTEM", + "changed_by": "ALERT_CREATION", + "closure_reason": "NO_REASON", + "status": "OPEN" + }, + "determination": { + "change_timestamp": "2023-10-30T17:36:05.423Z", + "value": "NONE", + "changed_by_type": "SYSTEM", + "changed_by": "ALERT_CREATION" + }, + "tags": None, + "alert_notes_present": False, + "threat_notes_present": False, + "asset_id": None, + "is_updated": False, + "device_id": 18919907, + "device_name": "DO-NOT-UPGRADE-3DOT9-1", + "device_uem_id": "", + "device_target_value": "LOW", + "device_policy": "Pscr SE Testing", + "device_policy_id": 465946, + "device_os": "WINDOWS", + "device_os_version": "Windows 10 x64", + "device_location": "UNKNOWN", + "device_external_ip": "10.10.10.10", + "device_internal_ip": "10.10.10.10", + "mdr_alert": False, + "mdr_alert_notes_present": False, + "mdr_threat_notes_present": False, + "report_id": "qzZl6z5WRjiyazX3aZtiiQ", + "report_name": "mdr-th-test-r-1", + "report_tags": [], + "ioc_id": "2614a883-1c0d-4ece-92b8-f733c7dec0a3", + "ioc_hit": "(process_name:trustedinstaller.exe)", + "watchlists": [ + { + "id": "tUKo4HPQYWVqZlYhnUTSw", + "name": "mdr-th-test-1" + } + ], + "process_guid": "ABCD1234-0120b1e3-0000062c-00000000-1da0b5622b36c32", + "process_pid": 1580, + "process_name": "c:\\windows\\servicing\\trustedinstaller.exe", + "process_sha256": "2a47e31b708c2ab1d0b4a40802b56c49505361ffb275e4b4c14370b3bfc12245", + "process_md5": "9ab25e301dac8a8f6cf14d51e7284545", + "process_effective_reputation": "ADAPTIVE_WHITE_LIST", + "process_reputation": "NOT_LISTED", + "process_cmdline": "C:\\WINDOWS\\servicing\\TrustedInstaller.exe", + "process_username": "NT AUTHORITY\\SYSTEM", + "process_issuer": [ + "Microsoft Windows Production PCA 2011" + ], + "process_publisher": [ + "Microsoft Windows" + ], + "parent_guid": "ABCD1234-0120b1e3-000002c0-00000000-1d9fbf60e8b2a59", + "parent_pid": 704, + "parent_name": "c:\\windows\\system32\\services.exe", + "parent_sha256": "f016360c75e8250af691929082ba2066078fba4e84eac3d496e4eda9a0b6ec62", + "parent_md5": "f26f9b26e933078756832b864eb627b7", + "parent_effective_reputation": "LOCAL_WHITE", + "parent_reputation": "NOT_LISTED", + "parent_cmdline": "C:\\WINDOWS\\system32\\services.exe", + "parent_username": "NT AUTHORITY\\SYSTEM", + "childproc_guid": "", + "childproc_username": "", + "childproc_cmdline": "" + } + }, + { + "count": 531, + "workflow_states": { + "OPEN": 531 + }, + "determination_values": { + "NONE": 531 + }, + "ml_classification_final_verdicts": {}, + "first_alert_timestamp": "2023-10-20T17:57:41.672Z", + "last_alert_timestamp": "2023-10-30T14:56:59.838Z", + "highest_severity": 5, + "policy_applied": "NOT_APPLIED", + "threat_notes_present": True, + "tags": [], + "device_count": 5, + "workload_count": 0, + "most_recent_alert": { + "org_key": "ABCD1234", + "alert_url": "defense.conferdeploy.net/alerts?s[c][query_string]=\ + id:887e6bbc-6224-4f36-ad37-084038b7fcab&orgKey=ABC12345", + "id": "1d2ada91-13c8-4d8f-8d13-fb3a8f5a938b", + "type": "WATCHLIST", + "backend_timestamp": "2023-10-30T15:07:02.935Z", + "user_update_timestamp": None, + "backend_update_timestamp": "2023-10-30T15:07:02.935Z", + "detection_timestamp": "2023-10-30T15:04:21.445Z", + "first_event_timestamp": "2023-10-30T14:56:59.838Z", + "last_event_timestamp": "2023-10-30T14:56:59.838Z", + "severity": 5, + "reason": "Process dllhost.exe was detected by the report \"test-wl-r-567\" in watchlist " + "\"test-wl-g-567\"", + "reason_code": "1b32b7cf-7c3d-30f1-97b4-6ec2e39530c9:627bbdfe-55a7-3100-89bc-25d618fb9684", + "threat_id": "1B32B7CF7C3D40F117B46EC2E39530C9", + "primary_event_id": "kte8_LXBTCurOS1NRFkNcw-0", + "policy_applied": "NOT_APPLIED", + "run_state": "RAN", + "sensor_action": "ALLOW", + "workflow": { + "change_timestamp": "2023-10-30T15:07:02.935Z", + "changed_by_type": "SYSTEM", + "changed_by": "ALERT_CREATION", + "closure_reason": "NO_REASON", + "status": "OPEN" + }, + "determination": { + "change_timestamp": "2023-10-30T15:07:02.935Z", + "value": "NONE", + "changed_by_type": "SYSTEM", + "changed_by": "ALERT_CREATION" + }, + "tags": None, + "alert_notes_present": False, + "threat_notes_present": True, + "asset_id": None, + "is_updated": False, + "device_id": 18919907, + "device_name": "DO-NOT-UPGRADE-3DOT9-1", + "device_uem_id": "", + "device_target_value": "LOW", + "device_policy": "Pscr SE Testing", + "device_policy_id": 465946, + "device_os": "WINDOWS", + "device_os_version": "Windows 10 x64", + "device_location": "UNKNOWN", + "device_external_ip": "10.10.10.10", + "device_internal_ip": "10.10.10.10", + "mdr_alert": False, + "mdr_alert_notes_present": False, + "mdr_threat_notes_present": False, + "report_id": "Q0O2FxEWSy2fSSYxEs2Pg", + "report_name": "test-wl-r-567", + "report_tags": [], + "ioc_id": "529de965-e1f6-4e7d-a37e-9e392da29740", + "ioc_hit": "(process_name:dllhost.exe)", + "watchlists": [ + { + "id": "6m1NPFvAR9cN183DNEEOQ", + "name": "test-wl-g-567" + } + ], + "process_guid": "ABCD1234-0120b1e3-00001e3c-00000000-1d9fbf6bbaf6d5d", + "process_pid": 7740, + "process_name": "c:\\windows\\system32\\dllhost.exe", + "process_sha256": "8477a5238c237df3ab0454cfef3df7d82162d3c72f8325a840c02558aa8b3e20", + "process_md5": "e3cd542b90fe84453ef3400278eb4d9c", + "process_effective_reputation": "ADAPTIVE_WHITE_LIST", + "process_reputation": "NOT_LISTED", + "process_cmdline": "C:\\WINDOWS\\system32\\DllHost.exe " + "/Processid:{973D20D7-562D-44B9-B70B-5A0F49CCDF3F}", + "process_username": "DO-NOT-UPGRADE-\\bit9qa", + "process_issuer": [ + "Microsoft Windows Production PCA 2011" + ], + "process_publisher": [ + "Microsoft Windows" + ], + "parent_guid": "ABCD1234-0120b1e3-0000033c-00000000-1d9fbf60ee12039", + "parent_pid": 828, + "parent_name": "c:\\windows\\system32\\svchost.exe", + "parent_sha256": "dab2ad1e12aebebceef118504165130e0585faae88d56cfc06b3905bdb18d021", + "parent_md5": "d4461ec74a79986aaab9ef3312c961f4", + "parent_effective_reputation": "LOCAL_WHITE", + "parent_reputation": "NOT_LISTED", + "parent_cmdline": "C:\\WINDOWS\\system32\\svchost.exe -k DcomLaunch -p", + "parent_username": "NT AUTHORITY\\SYSTEM", + "childproc_guid": "", + "childproc_username": "", + "childproc_cmdline": "" + } + }, + { + "count": 10, + "workflow_states": { + "CLOSED": 10 + }, + "determination_values": { + "NONE": 10 + }, + "ml_classification_final_verdicts": { + "NOT_ANOMALOUS": 10 + }, + "first_alert_timestamp": "2023-10-21T15:17:46.070Z", + "last_alert_timestamp": "2023-10-30T15:18:03.361Z", + "highest_severity": 9, + "policy_applied": "NOT_APPLIED", + "threat_notes_present": False, + "tags": [ + "kylie" + ], + "device_count": 1, + "workload_count": 0, + "most_recent_alert": { + "org_key": "ABCD1234", + "alert_url": "defense.conferdeploy.net/alerts?s[c][query_string]=\ + id:887e6bbc-6224-4f36-ad37-084038b7fcab&orgKey=ABC12345", + "id": "ecbc7e05-356f-4cbf-b2fd-fa37f8e67b9a", + "type": "WATCHLIST", + "backend_timestamp": "2023-10-30T15:21:45.395Z", + "user_update_timestamp": None, + "backend_update_timestamp": "2023-10-30T15:21:45.395Z", + "detection_timestamp": "2023-10-30T15:20:05.118Z", + "first_event_timestamp": "2023-10-30T15:18:03.361Z", + "last_event_timestamp": "2023-10-30T15:18:03.361Z", + "severity": 9, + "reason": "Process mftrace.exe was detected by the report \"Defense Evasion - Signed Binary Proxy " + "Execution - mftrace.exe\" in 6 watchlists", + "reason_code": "7103e507-8440-37be-a035-1a50d8773029:5510c3f4-6fe1-314b-bc87-f0ef2ee47734", + "threat_id": "7103E507844087BE20351A50D8773029", + "primary_event_id": "Rc6Y6xqaSbOnrtVfz5cLLA-0", + "policy_applied": "NOT_APPLIED", + "run_state": "RAN", + "sensor_action": "ALLOW", + "workflow": { + "change_timestamp": "2023-10-30T15:21:45.395Z", + "changed_by_type": "SYSTEM", + "changed_by": "AUTO_DISMISSAL", + "closure_reason": "NO_REASON", + "status": "CLOSED" + }, + "determination": { + "change_timestamp": "2023-10-30T15:21:45.395Z", + "value": "NONE", + "changed_by_type": "SYSTEM", + "changed_by": "ALERT_CREATION" + }, + "tags": [ + "kylie" + ], + "alert_notes_present": False, + "threat_notes_present": False, + "asset_id": None, + "is_updated": False, + "device_id": 18101454, + "device_name": "pscr-test-01-1677257450.4625878-20", + "device_uem_id": "", + "device_target_value": "LOW", + "device_policy": "Pscr SE Testing", + "device_policy_id": 465946, + "device_os": "WINDOWS", + "device_os_version": "Windows 10 x64 SP: 1", + "device_username": "pscr-test-01-1677257450.4625878-20@carbonblack.com", + "device_location": "UNKNOWN", + "device_external_ip": "10.10.10.10", + "mdr_alert": False, + "mdr_alert_notes_present": False, + "mdr_threat_notes_present": False, + "report_id": "oJFtoawGS92fVMXlELC1Ow-139cafcc-a365-4bec-8d72-602c35f1e150", + "report_name": "Defense Evasion - Signed Binary Proxy Execution - mftrace.exe", + "report_description": "Binaries signed with trusted digital certificates can execute on Windows " + "systems protected by digital signature validation. Several Microsoft signed " + "binaries that are default on Windows installations can be used to proxy " + "execution of other files.\n\nThreat:\nThis behavior may be abused by adversaries" + " to execute malicious files that could bypass application whitelisting and " + "signature validation on systems.\n\nFalse Positives:\nAs these are techniques " + "that leverage living off the land binaries, False positives may occur in some " + "environments.\n\nScore:\n90", + "report_tags": [ + "attack", + "attackframework", + "threathunting", + "hunting", + "evasion", + "execution", + "t1218", + "lolbin", + "windows", + "mftrace" + ], + "report_link": "https://attack.mitre.org/wiki/Technique/T1218", + "ioc_id": "139cafcc-a365-4bec-8d72-602c35f1e150-0", + "ioc_hit": "((process_name:mftrace.exe)) -enriched:True", + "watchlists": [ + { + "id": "9x0timurQkqP7FBKX4XrUw", + "name": "Carbon Black Advanced Threats" + }, + { + "id": "Cp5DTDiDRcah99nrcIz4Vw", + "name": "My Watchlist 2" + }, + { + "id": "b3l462JEQIK6cECXibgXBg", + "name": "My Watchlist 4" + }, + { + "id": "lJH9nbKbSRKhMtTR6ME35A", + "name": "test Watchlist" + }, + { + "id": "mBP84PY8SyOFJTKzJbmNQ", + "name": "My Watchlist 3" + }, + { + "id": "u9E3dfpJTMaKX0dSBtyIqQ", + "name": "My Watchlist" + } + ], + "process_guid": "ABCD1234-011434ce-0000d2a0-00000000-19db1ded53e8000", + "process_pid": 53920, + "process_name": "mftrace.exe", + "process_sha256": "5b60148e8666a458f804e5d0fe925a9f55cf016733458c58c1980addc44cd774", + "process_md5": "49eb775894a49f13193513b0138f72a9", + "process_effective_reputation": "LOCAL_WHITE", + "process_reputation": "NOT_LISTED", + "process_cmdline": "c:\\program files (x86)\\svchost.exe \\qwer sad olasdjf", + "process_username": "CB INTERNAL\\USER_1", + "process_issuer": [ + "Moravec Code Signing CA - G2" + ], + "process_publisher": [ + "Moravec Test Authority" + ], + "childproc_guid": "", + "childproc_username": "", + "childproc_cmdline": "", + "ml_classification_final_verdict": "NOT_ANOMALOUS", + "ml_classification_global_prevalence": "LOW", + "ml_classification_org_prevalence": "LOW" + } + }, + { + "count": 2, + "workflow_states": { + "OPEN": 2 + }, + "determination_values": { + "NONE": 2 + }, + "ml_classification_final_verdicts": { + "NOT_CLASSIFIED": 2 + }, + "first_alert_timestamp": "2023-10-26T14:26:59.477Z", + "last_alert_timestamp": "2023-10-26T14:26:59.477Z", + "highest_severity": 9, + "policy_applied": "NOT_APPLIED", + "threat_notes_present": False, + "tags": [], + "device_count": 1, + "workload_count": 0, + "most_recent_alert": { + "org_key": "ABCD1234", + "alert_url": "defense.conferdeploy.net/alerts?s[c][query_string]=\ + id:887e6bbc-6224-4f36-ad37-084038b7fcab&orgKey=ABC12345", + "id": "d76b25d2-e103-4522-b48e-30a28fd7f1dc", + "type": "WATCHLIST", + "backend_timestamp": "2023-10-26T14:29:54.345Z", + "user_update_timestamp": None, + "backend_update_timestamp": "2023-10-26T14:29:54.345Z", + "detection_timestamp": "2023-10-26T14:28:05.243Z", + "first_event_timestamp": "2023-10-26T14:26:59.477Z", + "last_event_timestamp": "2023-10-26T14:26:59.477Z", + "severity": 9, + "reason": "Process dismhost.exe was detected by the report \"Persistence - Accessibility Feature " + "Hijacking - Sethc.exe or Utilman.exe\" in 6 watchlists", + "reason_code": "5495b4de-a32b-35d3-9778-0a5b02338640:bc0cc6c3-f6a9-340b-ad5d-07ff07794d1e", + "threat_id": "5495B4DEA32BC5D3D7780A5B02338640", + "primary_event_id": "PRR3ViutQqmwLXyNmtS-Ew-0", + "policy_applied": "NOT_APPLIED", + "run_state": "RAN", + "sensor_action": "ALLOW", + "workflow": { + "change_timestamp": "2023-10-26T14:29:54.345Z", + "changed_by_type": "SYSTEM", + "changed_by": "ALERT_CREATION", + "closure_reason": "NO_REASON", + "status": "OPEN" + }, + "determination": { + "change_timestamp": "2023-10-26T14:29:54.345Z", + "value": "NONE", + "changed_by_type": "SYSTEM", + "changed_by": "ALERT_CREATION" + }, + "tags": None, + "alert_notes_present": False, + "threat_notes_present": False, + "asset_id": None, + "is_updated": False, + "device_id": 17853591, + "device_name": "Win10x64v2004", + "device_uem_id": "", + "device_target_value": "LOW", + "device_policy": "Pscr SE Testing", + "device_policy_id": 465946, + "device_os": "WINDOWS", + "device_os_version": "Windows 10 x64", + "device_location": "UNKNOWN", + "device_external_ip": "10.10.10.10", + "device_internal_ip": "10.10.10.10", + "mdr_alert": False, + "mdr_alert_notes_present": False, + "mdr_threat_notes_present": False, + "report_id": "oJFtoawGS92fVMXlELC1Ow-92de6c37-c143-4201-a0ea-973fca8f0dec", + "report_name": "Persistence - Accessibility Feature Hijacking - Sethc.exe or Utilman.exe", + "report_description": "This query looks for indications of sethc.exe or utilman.exe being replaced. " + "This behavior can be a leading indicator of adversary persistence or privilege " + "escalation.\n\nThreat:\nAdversaries can replace accessibility feature binaries " + "with alternate binaries. This behavior has been publicly observed by both APT3 " + "and APT29.\n\nFalse Positives:\nThese files may be legitimately replaced via " + "system update activity.\n\nScore:\n90", + "report_tags": [ + "backdoor", + "persistence", + "attackframework", + "attack", + "t1546", + "privesc", + "windows" + ], + "report_link": "https://community.carbonblack.com/t5/Threat-Research-Docs/Cb-Response-Advanced-Threats" + "-Threat-Intel-Feed/ta-p/38756", + "ioc_id": "92de6c37-c143-4201-a0ea-973fca8f0dec-0", + "ioc_hit": "(((filemod_name:system32\\\\sethc.exe OR filemod_name:SysArm32\\\\sethc.exe OR filemod_" + "name:system32\\\\utilman.exe OR filemod_name:SysArm32\\\\utilman.exe) -(process_name:" + "windows\\\\system32\\\\poqexec.exe OR process_name:windows\\\\system32\\\\wbengine.exe OR " + "process_name:sources\\\\setuphost.exe OR parent_name:wuauclt.exe OR process_name:" + "windows\\\\system32\\\\dism.exe OR process_name:windows\\\\SysArm32\\\\dism.exe OR process_" + "name:windows\\\\ccmcache\\\\* OR process_name:sources\\\\setupprep.exe OR process_name:" + "sources\\\\setupplatform.exe OR process_name:windows\\\\servicing\\\\trustedinstaller.exe " + "OR process_name:windows\\\\system32\\\\taskhostw.exe OR process_name:windows\\\\system32" + "\\\\cleanmgr.exe OR process_name:windows\\\\SysArm32\\\\cleanmgr.exe OR process_name:" + "windows\\\\softwaredistribution\\\\download\\\\*\\\\windowsupdatebox.exe OR process_cmdline" + ":\"localsystemnetworkrestricted\\ \\-p\\ \\-s\\ hvsics\"))) -enriched:True", + "watchlists": [ + { + "id": "9x0timurQkqP7FBKX4XrUw", + "name": "Carbon Black Advanced Threats" + }, + { + "id": "Cp5DTDiDRcah99nrcIz4Vw", + "name": "My Watchlist 2" + }, + { + "id": "b3l462JEQIK6cECXibgXBg", + "name": "My Watchlist 4" + }, + { + "id": "lJH9nbKbSRKhMtTR6ME35A", + "name": "test Watchlist" + }, + { + "id": "mBP84PY8SyOFJTKzJbmNQ", + "name": "My Watchlist 3" + }, + { + "id": "u9E3dfpJTMaKX0dSBtyIqQ", + "name": "My Watchlist" + } + ], + "process_guid": "ABCD1234-01106c97-000011e0-00000000-1da080d2ad07e3c", + "process_pid": 4576, + "process_name": "c:\\$windows.~bt\\work\\8952c707-3efc-4f94-bc75-6973c12d1042\\dismhost.exe", + "process_sha256": "21baef2bb5ab2df3aa4d95c8333aadadda61dee65e61ad2dbe5f3dbaddb163c7", + "process_md5": "80e6c06c378bc7c382c23b1d643cd7d2", + "process_effective_reputation": "TRUSTED_WHITE_LIST", + "process_reputation": "ADAPTIVE_WHITE_LIST", + "process_cmdline": "C:\\$WINDOWS.~BT\\Work\\8952C707-3EFC-4F94-BC75-6973C12D1042\\dismhost.exe " + "{7F60B69D-4182-422C-8D9E-C9EFF8C25564}", + "process_username": "NT AUTHORITY\\SYSTEM", + "process_issuer": [ + "Microsoft Windows Production PCA 2011" + ], + "process_publisher": [ + "Microsoft Windows" + ], + "parent_guid": "ABCD1234-01106c97-0000120c-00000000-1da08070d71d31d", + "parent_pid": 4620, + "parent_name": "c:\\$windows.~bt\\sources\\setuphost.exe", + "parent_sha256": "c3cda09375ef70d98778eeb60b57063e9bee9c6d339bfe9c78a109505fb0aef5", + "parent_md5": "328c3c5398356a671cf7ccc2d63dbd31", + "parent_effective_reputation": "ADAPTIVE_WHITE_LIST", + "parent_reputation": "NOT_LISTED", + "parent_cmdline": "\"C:\\$WINDOWS.~BT\\Sources\\SetupHost.Exe\" /Install /Package /Quiet /ReportId " + "2A023074-E74A-493D-86C9-BE98C74B5658.1 /FlightData \"RS:18AB8\" \"/CancelId\" \"" + "C-0f1b3704-a1e1-4ee9-95bd-0acfc322d310\" \"/PauseId\" \"P-0f1b3704-a1e1-4ee9-95bd-" + "0acfc322d310\" \"/CorrelationVector\" \"4LUJMlgxQkm8FZbm.46.2.2.115\" \"/" + "ActionListFile\" \"C:\\WINDOWS\\SoftwareDistribution\\Download\\" + "23df863fededce875f9108f92ea08646\\ActionList.xml\" ", + "parent_username": "NT AUTHORITY\\SYSTEM", + "childproc_guid": "", + "childproc_username": "", + "childproc_cmdline": "", + "ml_classification_final_verdict": "NOT_CLASSIFIED", + "ml_classification_global_prevalence": "HIGH", + "ml_classification_org_prevalence": "HIGH" + } + }, + { + "count": 2, + "workflow_states": { + "OPEN": 2 + }, + "determination_values": { + "NONE": 2 + }, + "ml_classification_final_verdicts": {}, + "first_alert_timestamp": "2023-10-11T19:39:46.639Z", + "last_alert_timestamp": "2023-10-11T19:46:23.393Z", + "highest_severity": 4, + "policy_applied": "NOT_APPLIED", + "threat_notes_present": False, + "tags": [], + "device_count": 2, + "workload_count": 0, + "most_recent_alert": { + "org_key": "ABCD1234", + "alert_url": "defense.conferdeploy.net/alerts?s[c][query_string]=\ + id:887e6bbc-6224-4f36-ad37-084038b7fcab&orgKey=ABC12345", + "id": "28f2228c-48e2-4ce7-a3e9-8a355dedbb6a", + "type": "WATCHLIST", + "backend_timestamp": "2023-10-25T11:24:28.759Z", + "user_update_timestamp": "2023-10-25T11:24:29.739Z", + "backend_update_timestamp": "2023-10-25T11:24:28.759Z", + "detection_timestamp": "2023-10-25T11:21:49.224Z", + "first_event_timestamp": "2023-10-11T19:46:23.393Z", + "last_event_timestamp": "2023-10-11T19:46:23.393Z", + "severity": 4, + "reason": "Process nltest.exe was detected by the report \"Discovery - NLTest Domain Trust Enumeration" + "\" in watchlist \"Managed Detection and Response Intelligence\"", + "reason_code": "7177ff6d-0968-3481-953e-773f9eaf11af:7e088273-7d5f-3e8d-87ca-b026c15ad163", + "threat_id": "7177FF6D0968F481553E773F9EAF11AF", + "primary_event_id": "vvAsddw1QYK-RdQMV0_v_w-0", + "policy_applied": "NOT_APPLIED", + "run_state": "RAN", + "sensor_action": "ALLOW", + "workflow": { + "change_timestamp": "2023-10-25T11:24:28.759Z", + "changed_by_type": "SYSTEM", + "changed_by": "ALERT_CREATION", + "closure_reason": "NO_REASON", + "status": "OPEN" + }, + "determination": { + "change_timestamp": "2023-10-25T11:24:28.759Z", + "value": "NONE", + "changed_by_type": "SYSTEM", + "changed_by": "ALERT_CREATION" + }, + "tags": None, + "alert_notes_present": False, + "threat_notes_present": False, + "asset_id": None, + "is_updated": False, + "device_id": 19013608, + "device_name": "3dot5-do-not-upgrade", + "device_uem_id": "", + "device_target_value": "LOW", + "device_policy": "pscr se testing", + "device_policy_id": 465946, + "device_os": "WINDOWS", + "device_os_version": "Windows 10 x64", + "device_location": "UNKNOWN", + "device_external_ip": "10.10.10.10", + "device_internal_ip": "10.10.10.10", + "mdr_alert": True, + "mdr_workflow": { + "change_timestamp": "2023-10-25T11:24:29.739Z", + "status": "TRIAGE_COMPLETE", + "is_assigned": True + }, + "mdr_determination": { + "change_timestamp": "2023-10-25T11:24:29.739Z", + "value": "LIKELY_THREAT" + }, + "mdr_alert_notes_present": False, + "mdr_threat_notes_present": False, + "report_id": "Hf02hPgRSODd1tiEbUnw-1AA42B3F-B323-41A3-B924-31EA00C9C2CF", + "report_name": "Discovery - NLTest Domain Trust Enumeration", + "report_description": "Attackers may leverage the nltest command to discover domain trusts. This " + "technique requires the installation of nltest via Windows RSAT or the Windows " + "Server AD DS role.", + "report_tags": [ + "nltest", + "discovery", + "t1482", + "attackframework", + "attack", + "windows" + ], + "report_link": "https://attack.mitre.org/techniques/T1482/", + "ioc_id": "1AA42B3F-B323-41A3-B924-31EA00C9C2CF", + "ioc_hit": "(process_name:nltest.exe....", + "watchlists": [ + { + "id": "5A93z6EISzSY8M8AUhzBjg", + "name": "Managed Detection and Response Intelligence" + } + ], + "threat_hunt_id": "845cac53-01ff-4e11-9c8d-a2eb5c1ac048", + "threat_hunt_name": "test preview", + "process_guid": "ABCD1234-01221fe8-00002074-00000000-1d9fc7b9a83015b", + "process_pid": 8308, + "process_name": "c:\\windows\\system32\\nltest.exe", + "process_sha256": "50742fc1c1af7bfb5a58af2c7d19a0d552a9c4493b1b972139f56927c25197aa", + "process_md5": "ebbc96ce1a4e2365822bb13b88950ee1", + "process_effective_reputation": "NOT_LISTED", + "process_reputation": "NOT_LISTED", + "process_cmdline": "nltest.exe /dclist:%userdnsdomain%", + "process_username": "3DOT5-DO-NOT-UP\\bit9qa", + "process_issuer": [ + "" + ], + "process_publisher": [ + "" + ], + "parent_guid": "ABCD1234-01221fe8-0000213c-00000000-1d9fc7b9a727a6d", + "parent_pid": 8508, + "parent_name": "c:\\windows\\system32\\cmd.exe", + "parent_sha256": "8258756c2e0ca794af527258e8a3a4f7431fbd7df44403603b94cb2a70cb1bdf", + "parent_md5": "00837ec16fd4063b27d4327b5ae85657", + "parent_effective_reputation": "ADAPTIVE_WHITE_LIST", + "parent_reputation": "NOT_LISTED", + "parent_cmdline": "\"cmd.exe\" /c \"nltest.exe /dclist:%userdnsdomain%\"", + "parent_username": "3DOT5-DO-NOT-UP\\bit9qa", + "childproc_guid": "", + "childproc_username": "", + "childproc_cmdline": "" + } + } + ], + "group_by_total_count": 2335 +} + +MOST_RECENT_ALERT = { + "org_key": "ABCD1234", + "alert_url": "defense.conferdeploy.net/alerts?s[c][query_string]=\ + id:887e6bbc-6224-4f36-ad37-084038b7fcab&orgKey=ABC12345", + "id": "d6f05ce5-23ad-4cf0-a4d8-ee564396b2d1", + "type": "WATCHLIST", + "backend_timestamp": "2023-10-30T17:32:02.851Z", + "user_update_timestamp": None, + "backend_update_timestamp": "2023-10-30T17:32:02.851Z", + "detection_timestamp": "2023-10-30T17:30:04.587Z", + "first_event_timestamp": "2023-10-30T17:19:22.943Z", + "last_event_timestamp": "2023-10-30T17:19:22.943Z", + "severity": 1, + "reason": "Process cmd.exe was detected by the report \"scale and performance test report\" in " + "watchlist \"perf_automation_feed_qrcialhx\"", + "reason_code": "5319dc28-4f8b-3a9f-84c6-e045c5e186ff:920e0ded-e95b-3d77-8eaa-e28bdaa133f2", + "threat_id": "5319DC284F8B2A9FC4C6E045C5E186FF", + "primary_event_id": "o6NwqERTQf6eYlD0kvpRLw-0", + "policy_applied": "NOT_APPLIED", + "run_state": "RAN", + "sensor_action": "ALLOW", + "workflow": { + "change_timestamp": "2023-10-30T17:32:02.851Z", + "changed_by_type": "SYSTEM", + "changed_by": "ALERT_CREATION", + "closure_reason": "NO_REASON", + "status": "OPEN" + }, + "determination": { + "change_timestamp": "2023-10-30T17:32:02.851Z", + "value": "NONE", + "changed_by_type": "SYSTEM", + "changed_by": "ALERT_CREATION" + }, + "tags": None, + "alert_notes_present": False, + "threat_notes_present": False, + "asset_id": None, + "is_updated": False, + "device_id": 18118176, + "device_name": "pscr-test-01-1677785033.788122-22", + "device_uem_id": "", + "device_target_value": "LOW", + "device_policy": "Pscr SE Testing", + "device_policy_id": 465946, + "device_os": "WINDOWS", + "device_os_version": "Windows 10 x64 SP: 1", + "device_username": "pscr-test-01-1677785033.788122-22@carbonblack.com", + "device_location": "UNKNOWN", + "device_external_ip": "10.10.10.10", + "mdr_alert": False, + "mdr_alert_notes_present": False, + "mdr_threat_notes_present": False, + "report_id": "vnbrUmClRh2Mh8398QtJww-scale_perf_automation_report01_qrcialhx", + "report_name": "scale and performance test report", + "report_description": "scale and performance test description", + "report_tags": [], + "ioc_id": "scale_perf_automation_report01_ioc01_qrcialhx", + "ioc_hit": "process_name:cmd.exe", + "watchlists": [ + { + "id": "gSpaq0J9QB1qRY3lEdAw", + "name": "perf_automation_feed_qrcialhx" + } + ], + "process_guid": "ABCD1234-01147620-00780012-00000000-19db1ded53e8000", + "process_pid": 7864338, + "process_name": "cmd.exe", + "process_sha256": "bb5743ff9ce542b7018d712597b2f3e2868e89feaf8d76253324644fbeda1899", + "process_md5": "0a56e038d66da45947f8fdf130aef2d5", + "process_effective_reputation": "LOCAL_WHITE", + "process_reputation": "NOT_LISTED", + "process_cmdline": "cmd.exe /c InfDefaultInstall.exe C:\\Users\\bit9qa\\AtomicRedTeam\\" + "atomic-red-team-vmware-develop\\atomics\\T1218\\src\\Infdefaultinstall.inf", + "process_username": "NT AUTHORITY\\SYSTEM", + "process_issuer": [ + "Moravec Code Signing CA - G2" + ], + "process_publisher": [ + "Moravec Test Authority" + ], + "childproc_guid": "", + "childproc_username": "", + "childproc_cmdline": "" +} + +GROUP_SEARCH_ALERT_REQUEST_OVERRIDE_GROUPBY = { + "group_by": { + "field": "NOT_THREAT_ID" + }, + "time_range": { + "range": "-10d" + }, + "criteria": { + "type": [ + "WATCHLIST" + ], + "minimum_severity": 1 + }, + "rows": 1, + "sort": [ + { + "field": "count", + "order": "DESC" + } + ] +} + + +ALERT_SEARCH_RESPONSE = { + "results": [ + { + "org_key": "ABC12345", + "alert_url": "test.io/alerts?s[c][query_string]=id:14b3238e-cff8-49bf-a1c0-d0c6587d41e4&orgKey=EWRTY2PK", + "id": "14b3238e-cff8-49bf-a1c0-d0c6587d41e4", + "type": "WATCHLIST", + "backend_timestamp": "2023-12-01T14:28:24.337Z", + "user_update_timestamp": None, + "backend_update_timestamp": "2023-12-01T14:28:24.337Z", + "detection_timestamp": "2023-12-01T14:25:18.539Z", + "first_event_timestamp": "2023-12-01T14:19:44.392Z", + "last_event_timestamp": "2023-12-01T14:19:44.392Z", + "severity": 1, + "reason": "Process cmd.exe was detected by the report \"scale and performance test report\" in watchlist " + "\"perf_automation_feed_qrcialhx\"", + "reason_code": "5319dc28-4f8b-3a9f-84c6-e045c5e186ff:920e0ded-e95b-3d77-8eaa-e28bdaa133f2", + "threat_id": "5319DC284F8B2A9FC4C6E045C5E186FF", + "primary_event_id": "XWBgs6G8TOuqc4NzjvWHDg-0", + "policy_applied": "NOT_APPLIED", + "run_state": "RAN", + "sensor_action": "ALLOW", + "workflow": { + "change_timestamp": "2023-12-01T14:28:24.337Z", + "changed_by_type": "SYSTEM", + "changed_by": "ALERT_CREATION", + "closure_reason": "NO_REASON", + "status": "OPEN" + }, + "determination": { + "change_timestamp": "2023-12-01T14:28:24.337Z", + "value": "NONE", + "changed_by_type": "SYSTEM", + "changed_by": "ALERT_CREATION" + }, + "tags": None, + "alert_notes_present": False, + "threat_notes_present": False, + "asset_id": None, + "is_updated": False, + "device_id": 18118170, + "device_name": "test", + "device_uem_id": "", + "device_target_value": "LOW", + "device_policy": "Pscr SE Testing", + "device_policy_id": 465946, + "device_os": "WINDOWS", + "device_os_version": "Windows 10 x64 SP: 1", + "device_username": "test@carbonblack.com", + "device_location": "UNKNOWN", + "device_external_ip": "10.10.10.10", + "mdr_alert": False, + "mdr_alert_notes_present": False, + "mdr_threat_notes_present": False, + "report_id": "vnbrUmClRh2Mh8398QtJww-scale_perf_automation_report01_qrcialhx", + "report_name": "scale and performance test report", + "report_description": "scale and performance test description", + "report_tags": [], + "ioc_id": "scale_perf_automation_report01_ioc01_qrcialhx", + "ioc_hit": "process_name:cmd.exe", + "watchlists": [ + { + "id": "gSpaq0J9QB1qRY3lEdAw", + "name": "perf_automation_feed_qrcialhx" + } + ], + "process_guid": "EWRTY2PK-0114761a-009367dc-00000000-19db1ded53e8000", + "process_pid": 9660380, + "process_name": "cmd.exe", + "process_sha256": "b1f11107d63211d73c04020c7390e2b3070750d45ac89ccbb06450ae6dcadd2f", + "process_md5": "40c8804dd11a4e54121172fe891c2e9a", + "process_effective_reputation": "LOCAL_WHITE", + "process_reputation": "NOT_LISTED", + "process_cmdline": "cmd.exe /c InfDefaultInstall.exe C:\\Users\\bit9qa\\Infdefaultinstall.inf", + "process_username": "NT AUTHORITY\\SYSTEM", + "process_issuer": [ + "Moravec Code Signing CA - G2" + ], + "process_publisher": [ + "Moravec Test Authority" + ], + "childproc_guid": "", + "childproc_username": "", + "childproc_cmdline": "" + } + ], + "num_found": 1236, + "num_available": 1236 +} + +GROUPED_ALERT_FACET_REQUEST = { + "group_by": { + "field": "THREAT_ID" + }, + "terms": { + "fields": [ + "type", + "THREAT_ID" + ], + "rows": 0 + }, + "criteria": { + "minimum_severity": 3 + }, + "exclusions": { + "type": [ + "HOST_BASED_FIREWALL", + "CONTAINER_RUNTIME" + ] + }, + "filter_values": True +} + +GROUPED_ALERT_FACET_RESPONSE = { + "results": [ + { + "field": "threat_id", + "values": [ + { + "total": 1, + "id": "0f8d8b5eb2ccc09ad3d2c01c6b10af7e4279f58202e49cfad93d8fab7581d294", + "name": "0f8d8b5eb2ccc09ad3d2c01c6b10af7e4279f58202e49cfad93d8fab7581d294" + }, + { + "total": 1, + "id": "13C37200E1CE8F8F7DBE4C7647291BCB", + "name": "13C37200E1CE8F8F7DBE4C7647291BCB" + }, + { + "total": 1, + "id": "1B32B7CF7C3D40F117B46EC2E39530C9", + "name": "1B32B7CF7C3D40F117B46EC2E39530C9" + }, + { + "total": 1, + "id": "1ce583a1df38f9020253fbf6092f82fa", + "name": "1ce583a1df38f9020253fbf6092f82fa" + }, + { + "total": 1, + "id": "2ECAD3461EBF6E7E12F4C4DCB013667D", + "name": "2ECAD3461EBF6E7E12F4C4DCB013667D" + }, + { + "total": 1, + "id": "30CD659F716EB1174FAF3FD71438A04B", + "name": "30CD659F716EB1174FAF3FD71438A04B" + }, + { + "total": 1, + "id": "379dd07932c4bb76514e822056941023", + "name": "379dd07932c4bb76514e822056941023" + }, + { + "total": 1, + "id": "3a99805c53d208b55d1de91f385018b01a1861069e8a11c7d28b9b8e008ca47a", + "name": "3a99805c53d208b55d1de91f385018b01a1861069e8a11c7d28b9b8e008ca47a" + }, + { + "total": 1, + "id": "45DC740C4FA77899B555E08B99F539B1", + "name": "45DC740C4FA77899B555E08B99F539B1" + }, + { + "total": 1, + "id": "5098E61E1E31B6E95C9C1257A465B669", + "name": "5098E61E1E31B6E95C9C1257A465B669" + } + ] + }, + { + "field": "type", + "values": [ + { + "total": 13, + "id": "WATCHLIST", + "name": "WATCHLIST" + }, + { + "total": 13, + "id": "CB_ANALYTICS", + "name": "CB_ANALYTICS" + } + ] + } + ] +} diff --git a/src/tests/unit/fixtures/platform/mock_asset_groups.py b/src/tests/unit/fixtures/platform/mock_asset_groups.py new file mode 100644 index 000000000..aa142e020 --- /dev/null +++ b/src/tests/unit/fixtures/platform/mock_asset_groups.py @@ -0,0 +1,316 @@ +"""Mock responses for asset groups""" + +CREATE_AG_REQUEST = { + "description": "Group Test Description", + "member_type": "DEVICE", + "name": "Group Test", + "policy_id": 7113785, + "query": "os_version:Windows" +} + +CREATE_AG_RESPONSE = { + "id": "4b48a403-e371-4e3d-ae6c-8eb9080fe7ad", + "name": "Group Test", + "description": "Group Test Description", + "org_key": "test", + "status": "OK", + "member_type": "DEVICE", + "discovered": False, + "create_time": "2022-11-09T06:27:30.734Z", + "update_time": "2022-11-09T06:27:30.734Z", + "query": "os_version:Windows", + "member_count": 0, + "policy_id": 7113785, + "policy_name": "Monitored" +} + +EXISTING_AG_DATA = { + "id": "db416fa2-d5f2-4fb5-8a5e-cd89f6ecda16", + "name": "Existing Group", + "description": "Some Description", + "org_key": "test", + "status": "OK", + "member_type": "DEVICE", + "discovered": False, + "create_time": "2022-11-09T06:27:30.734Z", + "update_time": "2022-11-09T06:27:30.734Z", + "query": None, + "member_count": 0, + "policy_id": 8675309, + "policy_name": "Jenny" +} + +EXISTING_AG_DATA_2 = { + "id": "509f437f-6b9a-4b8e-996e-9183b35f9069", + "name": "Another Group", + "description": "Some new description", + "org_key": "test", + "status": "OK", + "member_type": "DEVICE", + "discovered": False, + "create_time": "2022-11-09T06:27:30.734Z", + "update_time": "2022-11-09T06:27:30.734Z", + "query": None, + "member_count": 0, + "policy_id": 5555555, + "policy_name": "MrsQueen" +} + +EXISTING_AG_DATA_3 = { + "id": "16b0dd95-85a3-4f73-bcf4-9b666436c534", + "name": "BaronHarkonnen", + "description": "Bring in Feyd and Rabban", + "org_key": "test", + "status": "OK", + "member_type": "DEVICE", + "discovered": False, + "create_time": "2023-11-22T19:34:34.685Z", + "update_time": "2023-11-22T19:34:34.812Z", + "query": "os.equals:WINDOWS", + "member_count": 4, + "policy_id": 10191, + "policy_name": "BIFAR" +} + +UPDATE_AG_REQUEST = { + "id": "db416fa2-d5f2-4fb5-8a5e-cd89f6ecda16", + "name": "Renamed Group", + "description": "Change This Too", + "org_key": "test", + "status": "OK", + "member_type": "DEVICE", + "discovered": False, + "create_time": "2022-11-09T06:27:30.734Z", + "update_time": "2022-11-09T06:27:30.734Z", + "query": None, + "member_count": 0, + "policy_id": 9001, + "policy_name": "Jenny" +} + +QUERY_REQUEST = { + "query": "test", + "criteria": { + "policy_id": [ + 7113785 + ], + "name": [ + "Group Test" + ], + "discovered": [ + False + ], + "group_id": [ + "9b8b8d84-4a44-4a94-81ec-1f8ef52d4430" + ] + }, + "rows": 42, + "sort": [ + { + "field": "name", + "order": "ASC" + } + ], + "start": 0 +} + +QUERY_REQUEST_DEFAULT = { + "rows": 100, + "start": 0 +} + +QUERY_RESPONSE = { + "num_found": 1, + "results": [ + { + "id": "9b8b8d84-4a44-4a94-81ec-1f8ef52d4430", + "name": "Group Test", + "description": "Group Test", + "org_key": "test", + "status": "OK", + "member_type": "DEVICE", + "discovered": False, + "create_time": "2022-09-05T13:12:31.848Z", + "update_time": "2022-09-05T13:12:31.848Z", + "query": None, + "member_count": 0, + "policy_id": 7113785, + "policy_name": "Monitored" + } + ] +} + +LIST_MEMBERS_RESPONSE1 = { + "num_found": 3, + "member_ids": [ + "12345678", + "66760099", + "42691014" + ], + "members": [ + { + "external_member_id": "12345678", + "dynamic": True, + "manual": False + }, + { + "external_member_id": "66760099", + "dynamic": False, + "manual": True + }, + { + "external_member_id": "42691014", + "dynamic": True, + "manual": False + } + ] +} + +LIST_MEMBERS_OUTPUT1 = [ + { + "external_member_id": 12345678, + "dynamic": True, + "manual": False + }, + { + "external_member_id": 66760099, + "dynamic": False, + "manual": True + }, + { + "external_member_id": 42691014, + "dynamic": True, + "manual": False + } +] + +LIST_MEMBERS_RESPONSE2 = { + "num_found": 1, + "member_ids": [ + "98765" + ], + "members": [ + { + "external_member_id": "98765", + "dynamic": False, + "manual": True + } + ] +} + +GET_ALL_RESPONSE = { + "num_found": 2, + "results": [ + { + "id": "db416fa2-d5f2-4fb5-8a5e-cd89f6ecda16", + "name": "Existing Group", + "description": "Some Description", + "org_key": "test", + "status": "OK", + "member_type": "DEVICE", + "discovered": False, + "create_time": "2022-11-09T06:27:30.734Z", + "update_time": "2022-11-09T06:27:30.734Z", + "member_count": 0, + }, + { + "id": "509f437f-6b9a-4b8e-996e-9183b35f9069", + "name": "Another Group", + "description": "Some new description", + "org_key": "test", + "status": "OK", + "member_type": "DEVICE", + "discovered": False, + "create_time": "2022-11-09T06:27:30.734Z", + "update_time": "2022-11-09T06:27:30.734Z", + "member_count": 0, + } + ] +} + +GET_STATS_RESPONSE = { + "intersections": [ + { + "count": 2, + "ids": [ + "12345678", + "66760099" + ], + "group_id": "509f437f-6b9a-4b8e-996e-9183b35f9069", + "group_name": "Another Group", + "group_description": "Some new description" + }, + { + "count": 1, + "ids": [ + "66760099" + ], + "group_id": "8e0e3714-fece-4c76-9728-6ad2713cde72", + "group_name": "Secure Access Group", + "group_description": "More secure than usual" + }, + ], + "unassigned_properties": [ + { + "type": "POLICY", + "count": 0, + "ids": [] + } + ] +} + +PREVIEW_DELETE_REQUEST = { + "action": "ASSET_GROUPS_DELETE", + "asset_group_ids": ["db416fa2-d5f2-4fb5-8a5e-cd89f6ecda16", "149cea01-2a13-4a0a-8ca9-cdf359a6378e"] +} + +PREVIEW_NULL_RESPONSE = { + "preview": [] +} + +PREVIEW_ADD_MEMBERS_REQUEST = { + "action": "ADD_MEMBERS", + "asset_ids": [123, 456], + "asset_group_ids": ["db416fa2-d5f2-4fb5-8a5e-cd89f6ecda16"] +} + +PREVIEW_REMOVE_MEMBERS_REQUEST = { + "action": "REMOVE_MEMBERS", + "asset_ids": [123, 456], + "asset_group_ids": ["db416fa2-d5f2-4fb5-8a5e-cd89f6ecda16"] +} + +PREVIEW_UPDATE_REQUEST_1 = { + "action": "ASSET_GROUPS_UPDATE", + "asset_group_ids": ["16b0dd95-85a3-4f73-bcf4-9b666436c534"], + "policy_id": 32768 +} + +PREVIEW_UPDATE_REQUEST_2 = { + "action": "ASSET_GROUPS_UPDATE", + "asset_group_ids": ["16b0dd95-85a3-4f73-bcf4-9b666436c534"], + "policy_id": None +} + +PREVIEW_UPDATE_REQUEST_3 = { + "action": "ASSET_GROUPS_UPDATE", + "asset_group_ids": ["16b0dd95-85a3-4f73-bcf4-9b666436c534"], + "asset_group_query": "os: WINDOWS OR MACOS" +} + +PREVIEW_UPDATE_REQUEST_4 = { + "action": "ASSET_GROUPS_UPDATE", + "asset_group_ids": ["16b0dd95-85a3-4f73-bcf4-9b666436c534"], + "asset_group_query": None +} + +PREVIEW_DELETE_REQUEST_2 = { + "action": "ASSET_GROUPS_DELETE", + "asset_group_ids": ["db416fa2-d5f2-4fb5-8a5e-cd89f6ecda16"] +} + +PREVIEW_CREATE_REQUEST = { + "action": "ASSET_GROUPS_CREATE", + "asset_group_query": "os.equals:WINDOWS", + "policy_id": 123456 +} diff --git a/src/tests/unit/fixtures/platform/mock_devices.py b/src/tests/unit/fixtures/platform/mock_devices.py index 1413af8c6..43ce1fe5c 100644 --- a/src/tests/unit/fixtures/platform/mock_devices.py +++ b/src/tests/unit/fixtures/platform/mock_devices.py @@ -6,6 +6,18 @@ "ad_group_id": 0, "appliance_name": None, "appliance_uuid": None, + "asset_group": [ + { + "id": "db416fa2-d5f2-4fb5-8a5e-cd89f6ecda16", + "name": "Existing Group", + "membership_type": "MANUAL" + }, + { + "id": "509f437f-6b9a-4b8e-996e-9183b35f9069", + "name": "Another Group", + "membership_type": "DYNAMIC" + } + ], "av_ave_version": "8.3.62.44", "av_engine": "4.13.0.207-ave.8.3.62.44:avpack.8.5.0.66:vdf.8.18.9.10 (20200826)", "av_last_scan_time": None, @@ -92,6 +104,18 @@ "ad_group_id": 0, "appliance_name": None, "appliance_uuid": None, + "asset_group": [ + { + "id": "db416fa2-d5f2-4fb5-8a5e-cd89f6ecda16", + "name": "Existing Group", + "membership_type": "MANUAL" + }, + { + "id": "509f437f-6b9a-4b8e-996e-9183b35f9069", + "name": "Another Group", + "membership_type": "DYNAMIC" + } + ], "av_ave_version": "8.3.62.44", "av_engine": "4.13.0.207-ave.8.3.62.44:avpack.8.5.0.66:vdf.8.18.9.10 (20200826)", "av_last_scan_time": None, @@ -180,6 +204,18 @@ "ad_group_id": 0, "appliance_name": None, "appliance_uuid": None, + "asset_group": [ + { + "id": "db416fa2-d5f2-4fb5-8a5e-cd89f6ecda16", + "name": "Existing Group", + "membership_type": "MANUAL" + }, + { + "id": "509f437f-6b9a-4b8e-996e-9183b35f9069", + "name": "Another Group", + "membership_type": "DYNAMIC" + } + ], "av_ave_version": "8.3.62.44", "av_engine": "4.13.0.207-ave.8.3.62.44:avpack.8.5.0.66:vdf.8.18.9.10 (20200826)", "av_last_scan_time": None, @@ -410,3 +446,174 @@ } ] } + +ASSET_GROUPS_RESPONSE_1 = { + "98765": [ + "db416fa2-d5f2-4fb5-8a5e-cd89f6ecda16", + "509f437f-6b9a-4b8e-996e-9183b35f9069" + ], + "3031": [ + "509f437f-6b9a-4b8e-996e-9183b35f9069", + "91366048-04dd-4034-baf0-b768128fe433", + "4f0a24f8-002b-4fe7-aaa6-6844bae2639e" + ], + "1777": [ + "509f437f-6b9a-4b8e-996e-9183b35f9069", + "297b9b31-3737-4831-9dd1-cf47770df3e5" + ] +} + +ASSET_GROUPS_OUTPUT_1 = { + 98765: [ + "db416fa2-d5f2-4fb5-8a5e-cd89f6ecda16", + "509f437f-6b9a-4b8e-996e-9183b35f9069" + ], + 3031: [ + "509f437f-6b9a-4b8e-996e-9183b35f9069", + "91366048-04dd-4034-baf0-b768128fe433", + "4f0a24f8-002b-4fe7-aaa6-6844bae2639e" + ], + 1777: [ + "509f437f-6b9a-4b8e-996e-9183b35f9069", + "297b9b31-3737-4831-9dd1-cf47770df3e5" + ] +} + +ASSET_GROUPS_RESPONSE_2 = { + "98765": [ + "509f437f-6b9a-4b8e-996e-9183b35f9069" + ], + "3031": [ + "509f437f-6b9a-4b8e-996e-9183b35f9069", + ], + "1777": [ + "509f437f-6b9a-4b8e-996e-9183b35f9069", + ] +} + +ASSET_GROUPS_OUTPUT_2 = { + 98765: [ + "509f437f-6b9a-4b8e-996e-9183b35f9069" + ], + 3031: [ + "509f437f-6b9a-4b8e-996e-9183b35f9069", + ], + 1777: [ + "509f437f-6b9a-4b8e-996e-9183b35f9069", + ] +} + +ASSET_GROUPS_RESPONSE_3 = { + "98765": [ + "db416fa2-d5f2-4fb5-8a5e-cd89f6ecda16", + ], + "3031": [ + "91366048-04dd-4034-baf0-b768128fe433", + "4f0a24f8-002b-4fe7-aaa6-6844bae2639e" + ], + "1777": [ + "297b9b31-3737-4831-9dd1-cf47770df3e5" + ] +} + +ASSET_GROUPS_OUTPUT_3 = { + 98765: [ + "db416fa2-d5f2-4fb5-8a5e-cd89f6ecda16", + ], + 3031: [ + "91366048-04dd-4034-baf0-b768128fe433", + "4f0a24f8-002b-4fe7-aaa6-6844bae2639e" + ], + 1777: [ + "297b9b31-3737-4831-9dd1-cf47770df3e5" + ] +} + +ASSET_GROUPS_RESPONSE_SINGLE = { + "98765": [ + "db416fa2-d5f2-4fb5-8a5e-cd89f6ecda16", + "509f437f-6b9a-4b8e-996e-9183b35f9069" + ] +} + +ASSET_GROUPS_OUTPUT_SINGLE = { + 98765: [ + "db416fa2-d5f2-4fb5-8a5e-cd89f6ecda16", + "509f437f-6b9a-4b8e-996e-9183b35f9069" + ] +} + +ADD_POLICY_OVERRIDE_REQUEST = { + "action": "ADD_POLICY_OVERRIDE", + "asset_ids": [98765], + "policy_id": 1011 +} + +ADD_POLICY_OVERRIDE_RESPONSE = { + "preview": [ + { + "current_policy": { + "id": 11200, + "position": 2 + }, + "new_policy": { + "id": 1011, + "position": 1 + }, + "asset_count": 1, + "asset_query": "(device_id: 98765)", + "assets_search_definition": { + "query": "(device_id: 98765)" + } + } + ] +} + +REMOVE_POLICY_OVERRIDE_REQUEST = { + "action": "REMOVE_POLICY_OVERRIDE", + "asset_ids": [98765] +} + +REMOVE_POLICY_OVERRIDE_RESPONSE = { + "preview": [ + { + "current_policy": { + "id": 11200, + "position": 2 + }, + "new_policy": { + "id": 14760, + "position": 5 + }, + "asset_count": 1, + "asset_query": "(device_id: 98765)", + "assets_search_definition": { + "query": "(device_id: 98765)" + } + } + ] +} + + +def GET_SCROLL_DEVICES(rows, num_found, num_remaining): + """Generate results response based on num_remaining""" + return { + "org_key": "test", + "num_found": num_found, + "search_after": "MTcwMjMyMTM2MDU3OSwyMT" if num_remaining > 0 else "", + "results": [GET_DEVICE_RESP for _ in range(rows)] + } + + +EXPORT_JOB_REDIRECT = { + "id": 11608915, + "type": "ENDPOINTS", + "job_parameters": { + "job_parameters": None + }, + "connector_id": "ABCD1234", + "org_key": "test", + "status": "IN_PROGRESS", + "create_time": "2024-01-26T18:18:39.962319Z", + "last_update_time": "2024-01-26T18:18:40.140353Z" +} diff --git a/src/tests/unit/fixtures/platform/mock_observations.py b/src/tests/unit/fixtures/platform/mock_observations.py index 8dfa525c2..12f99d0ce 100644 --- a/src/tests/unit/fixtures/platform/mock_observations.py +++ b/src/tests/unit/fixtures/platform/mock_observations.py @@ -302,6 +302,112 @@ } +GET_OBSERVATIONS_DETAIL_JOB_RESULTS_FOR_DEOBFUSCATE = { + "approximate_unaggregated": 2, + "completed": 4, + "contacted": 4, + "num_aggregated": 1, + "num_available": 1, + "num_found": 1, + "results": [ + { + "alert_category": ["OBSERVED"], + "alert_id": None, + "backend_timestamp": "2023-02-08T03:22:21.570Z", + "device_external_ip": "127.0.0.1", + "device_group_id": 0, + "device_id": 17482451, + "device_installed_by": "bit9qa", + "device_internal_ip": "127.0.0.1", + "device_location": "ONSITE", + "device_name": "dev01-39x-1", + "device_os": "WINDOWS", + "device_os_version": "Windows 10 x64", + "device_policy": "lonergan policy", + "device_policy_id": 12345, + "device_target_priority": "MEDIUM", + "device_timestamp": "2023-02-08T03:20:33.751Z", + "document_guid": "KBrOYUNlTYe116ADgNvGw", + "enriched": True, + "enriched_event_type": "NETWORK", + "event_description": "The script...", + "event_id": "8fbccc2da75f11ed937ae3cb089984c6", + "event_network_inbound": False, + "event_network_local_ipv4": "127.0.0.1", + "event_network_location": "Santa Clara,CA,United States", + "event_network_protocol": "TCP", + "event_network_remote_ipv4": "127.0.0.1", + "event_network_remote_port": 80, + "event_report_code": "SUB_RPT_NONE", + "event_threat_score": [3], + "event_type": "netconn", + "ingress_time": 1675826462036, + "legacy": True, + "netconn_actions": ["ACTION_CONNECTION_ESTABLISHED"], + "netconn_domain": "a1887..dscq..akamai..net", + "netconn_inbound": False, + "netconn_ipv4": 388818410, + "netconn_local_ipv4": 11111, + "netconn_local_port": 11, + "netconn_location": "Santa Clara,CA,United States", + "netconn_port": 80, + "netconn_protocol": "PROTO_TCP", + "observation_description": "The application firefox.exe invoked ", + "observation_id": "8fbccc2da75f11ed937ae3cb089984c6:be6ff259-88e3-6286-789f-74defa192d2e", + "observation_type": "CB_ANALYTICS", + "org_id": "ABCD123456", + "parent_effective_reputation": "ADAPTIVE_WHITE_LIST", + "parent_effective_reputation_source": "CLOUD", + "parent_guid": "TEST-010ac2d3-00001c68-00000000-1d93b6c4d1f20ad", + "parent_hash": [ + "69c8bd1c1dc6103df6bfa9882b5717c0dc4acb8c0c85d8f5c9900db860b6c29b" + ], + "parent_name": "c:\\Windows\\System32\\powershell.exe", + "parent_pid": 7272, + "parent_reputation": "NOT_LISTED", + "process_cmdline": ["powershell.exe -encodedcommand VwByAGkAdABlAC0ATwB1AHQAcAB1AHQAIAAiAE4AbwAgAG0AYQB0AHQAZQByACAAaABvAHcAIAB0AGgAaQBuACAAeQBvAHUAIABzAGwAaQBjAGUAIABpAHQALAAgAGkAdAAnAHMAIABzAHQAaQBsAGwAIABiAGEAbABvAG4AZQB5AC4AIgA="], # noqa: E501 + "process_cmdline_length": [268], + "process_effective_reputation": "NOT_LISTED", + "process_effective_reputation_source": "AV", + "process_guid": "ABCD123456-010ac2d3-00001cf8-00000000-1d93b6c4d2b16a4", + "process_hash": [ + "9df1ec5e25919660a1b0b85d3965d55797b9aac81e028008428106c4dc" + ], + "process_name": "c:\\programdata\\mozilla-1de4eec8-1241-4177-a864-e594e8d1fb38\\updates", + "process_pid": [2000], + "process_reputation": "NOT_LISTED", + "process_sha256": "9df1ec5e25919660a1b0b85d3965d55797b9aac81e028008428106c4dc", + "process_start_time": "2023-02-08T03:20:32.131Z", + "process_username": ["DEV01-39X-1\\bit9qa"], + "ttp": [ + "INTERNATIONAL_SITE", + "ACTIVE_CLIENT", + "NETWORK_ACCESS", + "UNKNOWN_APP", + ], + } + ], +} + + +OBS_DEOBFUSCATE_CMDLINE_REQUEST = { + "input": "powershell.exe -encodedcommand VwByAGkAdABlAC0ATwB1AHQAcAB1AHQAIAAiAE4AbwAgAG0AYQB0AHQAZQByACAAaABvAHcAIAB0AGgAaQBuACAAeQBvAHUAIABzAGwAaQBjAGUAIABpAHQALAAgAGkAdAAnAHMAIABzAHQAaQBsAGwAIABiAGEAbABvAG4AZQB5AC4AIgA=" # noqa: E501 +} + + +OBS_DEOBFUSCATE_CMDLINE_RESPONSE = { + "original_code": "Write-Output \"No matter how thin you slice it, it's still baloney.\"\n", + "deobfuscated_code": "Write-Output \"No matter how thin you slice it, it's still baloney.\"\n", + "identities": [ + "Write-Output" + ], + "strings": [ + "No matter how thin you slice it, it's still baloney." + ], + "obfuscation_level": 0.0 +} + + GET_OBSERVATIONS_SEARCH_JOB_RESULTS_RESP_ALERTS = { "approximate_unaggregated": 2, "completed": 4, diff --git a/src/tests/unit/fixtures/platform/mock_policies.py b/src/tests/unit/fixtures/platform/mock_policies.py index b189706ae..ed6b70290 100644 --- a/src/tests/unit/fixtures/platform/mock_policies.py +++ b/src/tests/unit/fixtures/platform/mock_policies.py @@ -6,7 +6,7 @@ "org_key": "test", "version": 2, "priority_level": "HIGH", - "position": -1, + "position": 3, "is_system": False, "description": "", "auto_deregister_inactive_vdi_interval_ms": 0, @@ -200,6 +200,16 @@ "enable_auth_events": True } }, + { + "id": "cc075469-8d1e-4056-84b6-0e6f437c4010", + "name": "XDR", + "description": "Turns on XDR network data collection at the sensor", + "inherited_from": "", + "category": "data_collection", + "parameters": { + "enable_network_data_collection": False + } + }, { "id": "1f8a5e4b-34f2-4d31-9f8f-87c56facaec8", "name": "Advanced Scripting Prevention", @@ -313,7 +323,7 @@ "id": 65536, "name": "A Dummy Policy", "priority_level": "HIGH", - "position": -1, + "position": 3, "is_system": False, "description": "", "num_devices": 0 @@ -325,7 +335,7 @@ "name": "Forescout Policy", "description": "Initial Forescout policy, no protection turned on", "priority_level": "MEDIUM", - "position": -1, + "position": 4, "num_devices": 0 } @@ -335,7 +345,7 @@ "name": "Remediant AC Policy", "description": "Verifying AC capabilities ", "priority_level": "LOW", - "position": -1, + "position": 5, "num_devices": 0 } @@ -540,6 +550,7 @@ "org_key": "test", "priority_level": "MEDIUM", "description": "Hoopy Frood", + "position": 2, "av_settings": { "avira_protection_cloud": { "enabled": False, @@ -1539,6 +1550,7 @@ "name": "New Policy Name", "org_key": "test", "priority_level": "HIGH", + "position": 6, "version": 2, "is_system": False, "description": "Foobar", @@ -1949,7 +1961,7 @@ "name": "Crapco", "org_key": "test", "priority_level": "MEDIUM", - "position": -1, + "position": 5, "is_system": False, "description": "If you buy this, you'll buy ANYTHING!", "auto_deregister_inactive_vdi_interval_ms": 0, @@ -2465,3 +2477,162 @@ ], "sensor_configs": [] } + +SET_XDR_COLLECTION_REQUEST = { + "id": "cc075469-8d1e-4056-84b6-0e6f437c4010", + "parameters": { + "enable_network_data_collection": True + } +} + +SET_XDR_COLLECTION_RESPONSE = { + "successful": [ + { + "id": "cc075469-8d1e-4056-84b6-0e6f437c4010", + "name": "XDR", + "description": "Turns on XDR network data collection at the sensor", + "inherited_from": "", + "category": "data_collection", + "parameters": { + "enable_network_data_collection": True + } + } + ], + "failed": [] +} + +SET_AUTH_EVENT_COLLECTION_REQUEST = { + "id": "91c919da-fb90-4e63-9eac-506255b0a0d0", + "parameters": { + "enable_auth_events": False + } +} + +SET_AUTH_EVENT_COLLECTION_RESPONSE = { + "successful": [ + { + "id": "91c919da-fb90-4e63-9eac-506255b0a0d0", + "name": "Authentication Events", + "description": "Authentication Events", + "inherited_from": "", + "category": "data_collection", + "parameters": { + "enable_auth_events": False + } + } + ], + "failed": [] +} + +SET_AUTH_EVENT_COLLECTION_RESPONSE_ERROR = { + "successful": [], + "failed": [ + { + "id": "91c919da-fb90-4e63-9eac-506255b0a0d0", + "error_code": "TESTING_ERROR", + "message": "Test error" + } + ] +} + +PREVIEW_POLICY_CHANGES_REQUEST1 = { + "policies": [ + { + "id": 10240, + "position": 1 + } + ] +} + +PREVIEW_POLICY_CHANGES_RESPONSE1 = { + "preview": [ + { + "current_policy": { + "id": 70722, + "position": 2 + }, + "new_policy": { + "id": 10240, + "position": 1 + }, + "asset_count": 5, + "asset_query": "(-_exists_:ag_agg_key_dynamic AND ag_agg_key_manual:1790b51e683c8a20c2b2bbe3e41eacdc53e3632087bb5a3f2868588e99157b06 AND policy_override:false) OR (-_exists_:ag_agg_key_dynamic AND ag_agg_key_manual:aa8bd7e69c4ee45918bb126a17d90a1c8368b46f9bb5bf430cb0250c317cd1dc AND policy_override:false)" # noqa: E501 + }, + { + "current_policy": { + "id": 142857, + "position": 1 + }, + "new_policy": { + "id": 10240, + "position": 1 + }, + "asset_count": 2, + "asset_query": "(ag_agg_key_manual:1790b51e683c8a20c2b2bbe3e41eacdc53e3632087bb5a3f2868588e99157b06 AND ag_agg_key_dynamic:51f32868cdd197b491093617b259ea2f4a93550b7c130636df8d48e94d37c4c8 AND policy_override:false)" # noqa: E501 + } + ] +} + +PREVIEW_POLICY_CHANGES_REQUEST2 = { + "policies": [ + { + "id": 65536, + "position": 1 + } + ] +} + +PREVIEW_POLICY_CHANGES_RESPONSE2 = { + "preview": [ + { + "current_policy": { + "id": 1492, + "position": 2 + }, + "new_policy": { + "id": 65536, + "position": 1 + }, + "asset_count": 5, + "asset_query": "(-_exists_:ag_agg_key_dynamic AND ag_agg_key_manual:1790b51e683c8a20c2b2bbe3e41eacdc53e3632087bb5a3f2868588e99157b06 AND policy_override:false) OR (-_exists_:ag_agg_key_dynamic AND ag_agg_key_manual:aa8bd7e69c4ee45918bb126a17d90a1c8368b46f9bb5bf430cb0250c317cd1dc AND policy_override:false)" # noqa: E501 + }, + { + "current_policy": { + "id": 74656, + "position": 1 + }, + "new_policy": { + "id": 65536, + "position": 1 + }, + "asset_count": 2, + "asset_query": "(ag_agg_key_manual:1790b51e683c8a20c2b2bbe3e41eacdc53e3632087bb5a3f2868588e99157b06 AND ag_agg_key_dynamic:51f32868cdd197b491093617b259ea2f4a93550b7c130636df8d48e94d37c4c8 AND policy_override:false)" # noqa: E501 + } + ] +} + +ADD_POLICY_OVERRIDE_REQUEST = { + "action": "ADD_POLICY_OVERRIDE", + "asset_ids": [123, 456, 789], + "policy_id": 65536 +} + +ADD_POLICY_OVERRIDE_RESPONSE = { + "preview": [ + { + "current_policy": { + "id": 11200, + "position": 2 + }, + "new_policy": { + "id": 65536, + "position": 1 + }, + "asset_count": 3, + "asset_query": "(device_id: 123 OR 456 OR 789)", + "assets_search_definition": { + "query": "(device_id: 123 OR 456 OR 789)" + } + } + ] +} diff --git a/src/tests/unit/fixtures/platform/mock_policy_ruleconfigs.py b/src/tests/unit/fixtures/platform/mock_policy_ruleconfigs.py index f55d63eee..2c8066eb7 100644 --- a/src/tests/unit/fixtures/platform/mock_policy_ruleconfigs.py +++ b/src/tests/unit/fixtures/platform/mock_policy_ruleconfigs.py @@ -1185,6 +1185,16 @@ "parameters": { "enable_auth_events": True } + }, + { + "id": "cc075469-8d1e-4056-84b6-0e6f437c4010", + "name": "XDR", + "description": "Turns on XDR network data collection at the sensor", + "inherited_from": "", + "category": "data_collection", + "parameters": { + "enable_network_data_collection": False + } } ] } diff --git a/src/tests/unit/fixtures/platform/mock_process.py b/src/tests/unit/fixtures/platform/mock_process.py index e0b777bdc..7af9f8fd5 100644 --- a/src/tests/unit/fixtures/platform/mock_process.py +++ b/src/tests/unit/fixtures/platform/mock_process.py @@ -2987,3 +2987,17 @@ "contacted": 10, "completed": 2 } + +PROCESS_OBFUSCATED_CMDLINE = "powershell.exe -encodedcommand VwByAGkAdABlAC0ATwB1AHQAcAB1AHQAIAAiAE4AbwAgAG0AYQB0AHQAZQByACAAaABvAHcAIAB0AGgAaQBuACAAeQBvAHUAIABzAGwAaQBjAGUAIABpAHQALAAgAGkAdAAnAHMAIABzAHQAaQBsAGwAIABiAGEAbABvAG4AZQB5AC4AIgA=" # noqa: E501 + +PROCESS_DEOBFUSCATE_CMDLINE_RESPONSE = { + "original_code": "Write-Output \"No matter how thin you slice it, it's still baloney.\"\n", + "deobfuscated_code": "Write-Output \"No matter how thin you slice it, it's still baloney.\"\n", + "identities": [ + "Write-Output" + ], + "strings": [ + "No matter how thin you slice it, it's still baloney." + ], + "obfuscation_level": 0.0 +} diff --git a/src/tests/unit/platform/test_alertsv7_api.py b/src/tests/unit/platform/test_alertsv7_api.py index c2aaf1361..9c3083887 100755 --- a/src/tests/unit/platform/test_alertsv7_api.py +++ b/src/tests/unit/platform/test_alertsv7_api.py @@ -25,9 +25,12 @@ HostBasedFirewallAlert, IntrusionDetectionSystemAlert, DeviceControlAlert, + GroupedAlert, Process, - Job + Job, + NetworkThreatMetadata ) +from cbc_sdk.enterprise_edr.threat_intelligence import Watchlist from cbc_sdk.rest_api import CBCloudAPI from tests.unit.fixtures.CBCSDKMock import CBCSDKMock from tests.unit.fixtures.mock_rest_api import ALERT_SEARCH_SUGGESTIONS_RESP @@ -44,7 +47,17 @@ GET_NEW_ALERT_TYPE_RESP, GET_OPEN_WORKFLOW_JOB_RESP, GET_CLOSE_WORKFLOW_JOB_RESP, - GET_ALERT_WORKFLOW_INIT + GET_ALERT_WORKFLOW_INIT, + GET_ALERT_OBFUSCATED_CMDLINE, + ALERT_DEOBFUSCATE_CMDLINE_REQUEST, + ALERT_DEOBFUSCATE_CMDLINE_RESPONSE, + GROUP_SEARCH_ALERT_RESPONSE, + GROUP_SEARCH_ALERT_REQUEST, + GROUP_SEARCH_ALERT_REQUEST_OVERRIDE_GROUPBY, + MOST_RECENT_ALERT, + ALERT_SEARCH_RESPONSE, + GROUPED_ALERT_FACET_REQUEST, + GROUPED_ALERT_FACET_RESPONSE ) from tests.unit.fixtures.platform.mock_process import ( POST_PROCESS_VALIDATION_RESP, @@ -71,6 +84,9 @@ GET_THREAT_HISTORY ) +from tests.unit.fixtures.platform.mock_network_threat_metadata import (GET_NETWORK_THREAT_METADATA_RESP) +from tests.unit.fixtures.enterprise_edr.mock_threatintel import (GET_WATCHLIST_OBJECT_RESP) + @pytest.fixture(scope="function") def cb(): @@ -1217,6 +1233,22 @@ def test_alert_subtype_watchlistalert_string_class(cbcsdk_mock): assert isinstance(alert, WatchlistAlert) +def test_watchlistalert_getwatchlistobjects(cbcsdk_mock): + """Test WatchlistAlert get_watchlist_objects().""" + cbcsdk_mock.mock_request("GET", + "/api/alerts/v7/orgs/test/alerts/f6af290d-6a7f-461c-a8af-cf0d24311105", + GET_ALERT_v7_WATCHLIST_RESPONSE) + cbcsdk_mock.mock_request("GET", + "/threathunter/watchlistmgr/v3/orgs/test/watchlists/mnbvc098766HN60hatQMQ", + GET_WATCHLIST_OBJECT_RESP) + + api = cbcsdk_mock.api + watchlist_alert = api.select("WatchlistAlert", "f6af290d-6a7f-461c-a8af-cf0d24311105") + watchlist_objects = watchlist_alert.get_watchlist_objects() + assert isinstance(watchlist_objects, list) + assert isinstance(watchlist_objects[0], Watchlist) + + def test_alert_subtype_devicecontrolalert_class(cbcsdk_mock): """Test DeviceControlAlert class instantiation.""" cbcsdk_mock.mock_request("GET", @@ -1297,6 +1329,26 @@ def test_alert_subtype_intrusiondetectionsystemalert_string_class(cbcsdk_mock): assert isinstance(alert, IntrusionDetectionSystemAlert) +def test_intrusiondetectionsystemalert_get_network_threat_metadata(cbcsdk_mock): + """Test IntrusionDetectionSystemAlert class as string instantiation.""" + cbcsdk_mock.mock_request("GET", + "/api/alerts/v7/orgs/test/alerts/ca316d99-a808-3779-8aab-62b2b6d9541c", + GET_ALERT_v7_INTRUSION_DETECTION_SYSTEM_RESPONSE) + + cbcsdk_mock.mock_request( + "GET", + "/threatmetadata/v1/orgs/test/detectors/4b98443a-ba0d-4ff5-b99e-e5e70432a214", + GET_NETWORK_THREAT_METADATA_RESP + ) + + api = cbcsdk_mock.api + alert = api.select("IntrusionDetectionSystemAlert", "ca316d99-a808-3779-8aab-62b2b6d9541c") + assert isinstance(alert, IntrusionDetectionSystemAlert) + + network_threat_metadata = alert.get_network_threat_metadata() + assert isinstance(network_threat_metadata, NetworkThreatMetadata) + + def test_alert_subtype_invalid_string_class(cbcsdk_mock): """Test invalidAlertType class as string instantiation.""" cbcsdk_mock.mock_request("GET", @@ -1906,3 +1958,283 @@ def test_time_range_formatting(cbcsdk_mock, start, end, time_filter): api = cbcsdk_mock.api alert_query = api.select(Alert).set_time_range(start=start, end=end) assert alert_query._time_range == time_filter + + +def test_alert_deobfuscate_cmdline(cbcsdk_mock): + """Test the deobfuscate_cmdline() method.""" + def on_post_deobfuscate(url, body, **kwargs): + assert body == ALERT_DEOBFUSCATE_CMDLINE_REQUEST + return ALERT_DEOBFUSCATE_CMDLINE_RESPONSE + + cbcsdk_mock.mock_request("GET", + "/api/alerts/v7/orgs/test/alerts/86123310980efd0b38111eba4bfa5e98aa30b19", + GET_ALERT_OBFUSCATED_CMDLINE) + cbcsdk_mock.mock_request("POST", "/tau/v2/orgs/test/reveal", on_post_deobfuscate) + + api = cbcsdk_mock.api + alert = api.select(Alert, "86123310980efd0b38111eba4bfa5e98aa30b19") + deobfuscation = alert.deobfuscate_cmdline() + assert len(deobfuscation['identities']) == 1 + assert len(deobfuscation['strings']) == 1 + assert deobfuscation['deobfuscated_code'] == \ + "Write-Output \"No matter how thin you slice it, it's still baloney.\"\n" + + +def test_alert_all(cbcsdk_mock): + """Test all() method returns list""" + def on_post(url, body, **kwargs): + return {"results": [{"id": "S0L0", "org_key": "test", "threat_id": "B0RG", + "workflow": {"status": "OPEN"}}], "num_found": 1} + + cbcsdk_mock.mock_request("POST", "/api/alerts/v7/orgs/test/alerts/_search", on_post) + api = cbcsdk_mock.api + alert_query = api.select(Alert) + alert_list = alert_query.all() + + assert isinstance(alert_list, list) + + +def test_group_alert_search_request(cbcsdk_mock): + """Test group alert search.""" + def on_post(url, body, **kwargs): + assert body == GROUP_SEARCH_ALERT_REQUEST + return GROUP_SEARCH_ALERT_RESPONSE + + cbcsdk_mock.mock_request("POST", "/api/alerts/v7/orgs/test/grouped_alerts/_search", on_post) + + api = cbcsdk_mock.api + grouped_alerts = api.select(GroupedAlert).set_time_range(range="-10d").add_criteria("type", "WATCHLIST").\ + set_minimum_severity(1).sort_by("count", "DESC") + group_alert = grouped_alerts.first() + + assert isinstance(group_alert, GroupedAlert) + + +def test_group_alert_most_recent_alert(cbcsdk_mock): + """Test group alert search most_recent_alert_() returns the most recent alert.""" + def on_post(url, body, **kwargs): + assert body == GROUP_SEARCH_ALERT_REQUEST + return GROUP_SEARCH_ALERT_RESPONSE + + cbcsdk_mock.mock_request("POST", "/api/alerts/v7/orgs/test/grouped_alerts/_search", on_post) + + api = cbcsdk_mock.api + grouped_alerts = api.select(GroupedAlert).set_time_range(range="-10d").add_criteria("type", "WATCHLIST").\ + set_minimum_severity(1).sort_by("count", "DESC") + first_grouped_alert = grouped_alerts.first() + most_recent_alert = first_grouped_alert.most_recent_alert_ + + assert isinstance(most_recent_alert, WatchlistAlert) + assert most_recent_alert.to_json() == MOST_RECENT_ALERT + + +def test_group_alert_set_group_by(cbcsdk_mock): + """Test set_group_by() overrides the init THREAT_ID in the GroupAlertSearchQuery.""" + def on_post(url, body, **kwargs): + if body["group_by"]["field"] == "THREAT_ID": + # path on first call when set_group_by is defaulted to THREAT_ID + assert body == GROUP_SEARCH_ALERT_REQUEST + assert body["group_by"]["field"] == "THREAT_ID" + else: + # path on second call where group_by is overridden + assert body == GROUP_SEARCH_ALERT_REQUEST_OVERRIDE_GROUPBY + assert body["group_by"]["field"] == "NOT_THREAT_ID" + return GROUP_SEARCH_ALERT_RESPONSE + + cbcsdk_mock.mock_request("POST", "/api/alerts/v7/orgs/test/grouped_alerts/_search", on_post) + + api = cbcsdk_mock.api + grouped_alerts = api.select(GroupedAlert).set_time_range(range="-10d").add_criteria("type", "WATCHLIST").\ + set_minimum_severity(1).sort_by("count", "DESC") + grouped_alerts.first() + grouped_alerts = grouped_alerts.set_group_by("NOT_THREAT_ID") + grouped_alerts.first() + + +def test_group_alert_bulk_close_workflow(cbcsdk_mock): + """Test closing a group alert job. Will raise a not implemented exception""" + api = cbcsdk_mock.api + group_alert_query = api.select(GroupedAlert) + with pytest.raises(NotImplementedError): + group_alert_query.close("OTHER", "TRUE_POSITIVE", "Note about the determination") + + +def test_group_alert_to_get_alert_search_query(cbcsdk_mock): + """Test the helper function get_alert_search_query creates the proper request.""" + def on_post(url, body, **kwargs): + assert body == GROUP_SEARCH_ALERT_REQUEST + return GROUP_SEARCH_ALERT_RESPONSE + + cbcsdk_mock.mock_request("POST", "/api/alerts/v7/orgs/test/grouped_alerts/_search", on_post) + + api = cbcsdk_mock.api + grouped_alerts = api.select(GroupedAlert).set_time_range(range="-10d").add_criteria("type", "WATCHLIST").\ + set_minimum_severity(1).sort_by("count", "DESC") + group_alert = grouped_alerts.first() + + alert_search_query = group_alert.get_alert_search_query() + manual_alert_search_query = api.select(Alert).set_time_range(range="-10d").add_criteria("type", "WATCHLIST").\ + set_minimum_severity(1).\ + add_criteria("threat_id", group_alert.most_recent_alert["threat_id"]) + + # deleting instance of querybuilder for assertion check + delattr(alert_search_query, "_query_builder") + delattr(manual_alert_search_query, "_query_builder") + + assert vars(alert_search_query) == vars(manual_alert_search_query) + + +def test_group_alert_to_get_alerts(cbcsdk_mock): + """Test the helper function get_alerts creates the proper request.""" + def on_post(url, body, **kwargs): + assert body == GROUP_SEARCH_ALERT_REQUEST + return GROUP_SEARCH_ALERT_RESPONSE + + cbcsdk_mock.mock_request("POST", "/api/alerts/v7/orgs/test/grouped_alerts/_search", on_post) + cbcsdk_mock.mock_request("POST", "/api/alerts/v7/orgs/test/alerts/_search", ALERT_SEARCH_RESPONSE) + + api = cbcsdk_mock.api + grouped_alerts = api.select(GroupedAlert).set_time_range(range="-10d").add_criteria("type", "WATCHLIST").\ + set_minimum_severity(1).sort_by("count", "DESC") + group_alert = grouped_alerts.first() + alerts = group_alert.get_alerts() + alert = alerts[0] + + assert isinstance(alerts, list) + assert alert.get("type") == "WATCHLIST" + assert alert.get("threat_id") == group_alert.most_recent_alert.get("threat_id") + + +def test_grouped_alert_build_query(cbcsdk_mock): + """Test that grouped alert builds the query correctly when using len() to get the number of results.""" + + def on_post(url, body, **kwargs): + assert body == { + "group_by": { + "field": "THREAT_ID" + }, + "time_range": { + "range": "-10d" + }, + "criteria": { + "type": [ + "WATCHLIST" + ], + "minimum_severity": 1 + }, + "rows": 1, + "sort": [ + { + "field": "count", + "order": "DESC" + } + ] + } + return { + "num_found": 25, + "num_available": 25, + "results": [ + { + "count": 994, + "workflow_states": { + "CLOSED": 1, + "OPEN": 993 + }, + "determination_values": { + "NONE": 994 + }, + "ml_classification_final_verdicts": { + "NOT_CLASSIFIED": 4, + "NOT_ANOMALOUS": 982, + "ANOMALOUS": 8 + }, + "first_alert_timestamp": "2023-11-21T21:24:37.756Z", + "last_alert_timestamp": "2023-12-01T21:00:42.937Z", + "highest_severity": 7, + "policy_applied": "NOT_APPLIED", + "threat_notes_present": False, + "tags": [], + "device_count": 10, + "workload_count": 0, + "most_recent_alert": { + "org_key": "ABCD1234", + "alert_url": "defense.conferdeploy.net/alerts?s[c]" + "[query_string]=id:9d7f0692-e9cc-4ecc-9983-b063f1455cab&orgKey=ABCD1234", + "id": "9d7f0692-e9cc-4ecc-9983-b063f1455cab", + "type": "WATCHLIST", + "severity": 7, + } + } + ], + "group_by_total_count": 6421 + } + + cbcsdk_mock.mock_request("POST", "/api/alerts/v7/orgs/test/grouped_alerts/_search", on_post) + api = cbcsdk_mock.api + + grouped_alert_query = api.select(GroupedAlert).set_minimum_severity(1).set_time_range(range="-10d")\ + .add_criteria("type", "WATCHLIST").set_rows(1).sort_by("count", "DESC") + assert len(grouped_alert_query) == 25 + + +def test_group_alert_bulk_update_workflow(cbcsdk_mock): + """Test updating a group alert job. Will raise a not implemented exception""" + api = cbcsdk_mock.api + group_alert_query = api.select(GroupedAlert) + with pytest.raises(NotImplementedError): + group_alert_query.update("OPEN", "OTHER", "TRUE_POSITIVE", "Note about the determination") + + +def test_grouped_alert_search_query_to_alert_search_query(cbcsdk_mock): + """Test the helper function converts a grouped alert search query to an ungrouped query""" + api = cbcsdk_mock.api + expected_alert_search_query = api.select(Alert).set_time_range(range="-10d").add_criteria("type", "WATCHLIST").\ + set_minimum_severity(1) + + grouped_alerts_search_query = api.select(GroupedAlert).set_time_range(range="-10d").\ + add_criteria("type", "WATCHLIST").set_minimum_severity(1).sort_by("count", "DESC") + alert_search_query = grouped_alerts_search_query.get_alert_search_query() + + # deleting instance of querybuilder for assertion check + delattr(alert_search_query, "_query_builder") + delattr(expected_alert_search_query, "_query_builder") + + assert alert_search_query.__module__ == "cbc_sdk.platform.alerts" and type(alert_search_query).__name__ == \ + "AlertSearchQuery" + assert vars(alert_search_query) == vars(expected_alert_search_query) + + +def test_alert_search_query_to_grouped_alert_search_query(cbcsdk_mock): + """Test the helper function converts an alert search query to a grouped query""" + api = cbcsdk_mock.api + expected_grouped_alert_search_query = api.select(GroupedAlert).set_time_range(range="-10d").\ + add_criteria("type", "WATCHLIST").\ + set_minimum_severity(1).set_group_by("threat_id") + + alerts_search_query = api.select(Alert).set_time_range(range="-10d").\ + add_criteria("type", "WATCHLIST").set_minimum_severity(1).sort_by("first_event_timestamp", "DESC") + grouped_alert_search_query = alerts_search_query.set_group_by("threat_id") + + # deleting instance of querybuilder for assertion check + delattr(grouped_alert_search_query, "_query_builder") + delattr(expected_grouped_alert_search_query, "_query_builder") + + assert grouped_alert_search_query.__module__ == "cbc_sdk.platform.alerts" and type(grouped_alert_search_query).\ + __name__ == "GroupedAlertSearchQuery" + assert vars(grouped_alert_search_query) == vars(expected_grouped_alert_search_query) + + +def test_query_grouped_alert_facets(cbcsdk_mock): + """Test a grouped alert facet query.""" + + def on_post(url, body, **kwargs): + assert body == GROUPED_ALERT_FACET_REQUEST + return GROUPED_ALERT_FACET_RESPONSE + cbcsdk_mock.mock_request("POST", "/api/alerts/v7/orgs/test/grouped_alerts/_facet", on_post) + api = cbcsdk_mock.api + + query = api.select(GroupedAlert).set_group_by("THREAT_ID").set_minimum_severity(3).\ + add_exclusions("type", ["HOST_BASED_FIREWALL", "CONTAINER_RUNTIME"]) + facets = query.facets(["type", "THREAT_ID"], 0, True) + assert facets == GROUPED_ALERT_FACET_RESPONSE["results"] + assert len(facets) == 2 diff --git a/src/tests/unit/platform/test_asset_groups.py b/src/tests/unit/platform/test_asset_groups.py new file mode 100644 index 000000000..20dce515e --- /dev/null +++ b/src/tests/unit/platform/test_asset_groups.py @@ -0,0 +1,461 @@ +# ******************************************************* +# Copyright (c) VMware, Inc. 2020-2022. All Rights Reserved. +# SPDX-License-Identifier: MIT +# ******************************************************* +# * +# * DISCLAIMER. THIS PROGRAM IS PROVIDED TO YOU "AS IS" WITHOUT +# * WARRANTIES OR CONDITIONS OF ANY KIND, WHETHER ORAL OR WRITTEN, +# * EXPRESS OR IMPLIED. THE AUTHOR SPECIFICALLY DISCLAIMS ANY IMPLIED +# * WARRANTIES OR CONDITIONS OF MERCHANTABILITY, SATISFACTORY QUALITY, +# * NON-INFRINGEMENT AND FITNESS FOR A PARTICULAR PURPOSE. + +"""Tests of the asset groups support in the Platform API.""" + +import pytest +import logging +import copy +from cbc_sdk.rest_api import CBCloudAPI +from cbc_sdk.errors import ApiError +from cbc_sdk.platform import AssetGroup, Device +from tests.unit.fixtures.CBCSDKMock import CBCSDKMock +from tests.unit.fixtures.platform.mock_asset_groups import (CREATE_AG_REQUEST, CREATE_AG_RESPONSE, EXISTING_AG_DATA, + UPDATE_AG_REQUEST, QUERY_REQUEST, QUERY_REQUEST_DEFAULT, + QUERY_RESPONSE, LIST_MEMBERS_RESPONSE1, + LIST_MEMBERS_OUTPUT1, LIST_MEMBERS_RESPONSE2, + GET_ALL_RESPONSE, GET_STATS_RESPONSE, + PREVIEW_DELETE_REQUEST, PREVIEW_NULL_RESPONSE, + PREVIEW_ADD_MEMBERS_REQUEST, PREVIEW_REMOVE_MEMBERS_REQUEST, + EXISTING_AG_DATA_3, PREVIEW_UPDATE_REQUEST_1, + PREVIEW_UPDATE_REQUEST_2, PREVIEW_UPDATE_REQUEST_3, + PREVIEW_UPDATE_REQUEST_4, PREVIEW_DELETE_REQUEST_2, + PREVIEW_CREATE_REQUEST) +from tests.unit.fixtures.platform.mock_devices import GET_DEVICE_RESP + + +logging.basicConfig(format='%(asctime)s %(levelname)s:%(message)s', level=logging.DEBUG, filename='log.txt') + + +@pytest.fixture(scope="function") +def cb(): + """Create CBCloudAPI singleton""" + return CBCloudAPI(url="https://example.com", + org_key="test", + token="abcd/1234", + ssl_verify=False) + + +@pytest.fixture(scope="function") +def cbcsdk_mock(monkeypatch, cb): + """Mocks CBC SDK for unit tests""" + return CBCSDKMock(monkeypatch, cb) + + +# ==================================== UNIT TESTS BELOW ==================================== + +def test_create_asset_group(cbcsdk_mock): + """Tests the Create Asset Group call.""" + posted = False + + def on_post(uri, body, **kwargs): + nonlocal posted + assert body == CREATE_AG_REQUEST + posted = True + return CREATE_AG_RESPONSE + + cbcsdk_mock.mock_request('POST', '/asset_groups/v1/orgs/test/groups', on_post) + api = cbcsdk_mock.api + group = AssetGroup.create_group(api, "Group Test", "Group Test Description", policy_id=7113785, + query="os_version:Windows") + assert posted + assert group is not None + assert group.id == '4b48a403-e371-4e3d-ae6c-8eb9080fe7ad' + assert group.name == 'Group Test' + assert group.description == 'Group Test Description' + assert group.policy_id == 7113785 + assert group.query == "os_version:Windows" + + +def test_find_and_update_asset_group(cbcsdk_mock): + """Tests finding and updating the asset group.""" + did_put = False + + def on_put(url, body, **kwargs): + nonlocal did_put + assert body == UPDATE_AG_REQUEST + did_put = True + return copy.deepcopy(body) + + cbcsdk_mock.mock_request('GET', '/asset_groups/v1/orgs/test/groups/db416fa2-d5f2-4fb5-8a5e-cd89f6ecda16', + copy.deepcopy(EXISTING_AG_DATA)) + cbcsdk_mock.mock_request('PUT', '/asset_groups/v1/orgs/test/groups/db416fa2-d5f2-4fb5-8a5e-cd89f6ecda16', + on_put) + api = cbcsdk_mock.api + group = api.select(AssetGroup, 'db416fa2-d5f2-4fb5-8a5e-cd89f6ecda16') + assert not did_put + assert group is not None + assert group.id == 'db416fa2-d5f2-4fb5-8a5e-cd89f6ecda16' + assert group.name == 'Existing Group' + assert group.description == 'Some Description' + assert group.policy_id == 8675309 + + group.name = "Renamed Group" + group.description = 'Change This Too' + group.policy_id = 9001 + group.save() + assert did_put + + +def test_find_and_delete_asset_group(cbcsdk_mock): + """Tests finding and deleting the asset group.""" + did_delete = False + + def on_delete(url, body): + nonlocal did_delete + did_delete = True + return CBCSDKMock.StubResponse(None, scode=200) + + cbcsdk_mock.mock_request('GET', '/asset_groups/v1/orgs/test/groups/db416fa2-d5f2-4fb5-8a5e-cd89f6ecda16', + copy.deepcopy(EXISTING_AG_DATA)) + cbcsdk_mock.mock_request('DELETE', '/asset_groups/v1/orgs/test/groups/db416fa2-d5f2-4fb5-8a5e-cd89f6ecda16', + on_delete) + api = cbcsdk_mock.api + group = api.select(AssetGroup, 'db416fa2-d5f2-4fb5-8a5e-cd89f6ecda16') + assert group is not None + assert group.id == 'db416fa2-d5f2-4fb5-8a5e-cd89f6ecda16' + assert group.name == 'Existing Group' + assert group.description == 'Some Description' + assert group.policy_id == 8675309 + + group.delete() + assert did_delete + + +@pytest.mark.parametrize("name, polid, groupid", [ + ("Group Test", 7113785, "9b8b8d84-4a44-4a94-81ec-1f8ef52d4430"), + (["Group Test"], [7113785], ["9b8b8d84-4a44-4a94-81ec-1f8ef52d4430"]), +]) +def test_query_with_all_options(cbcsdk_mock, name, polid, groupid): + """Tests querying for asset groups with all options set.""" + + def on_post(uri, body, **kwargs): + tbody = copy.deepcopy(body) + if 'start' not in tbody: + tbody['start'] = 0 + assert tbody == QUERY_REQUEST + return QUERY_RESPONSE + + cbcsdk_mock.mock_request('POST', '/asset_groups/v1/orgs/test/groups/_search', on_post) + api = cbcsdk_mock.api + query = api.select(AssetGroup).where("test").add_criteria("discovered", False).add_criteria("name", name) + query.add_criteria("policy_id", polid).add_criteria("group_id", groupid).sort_by("name", "ASC").set_rows(42) + assert query._count() == 1 + output = list(query) + assert len(output) == 1 + assert output[0].id == "9b8b8d84-4a44-4a94-81ec-1f8ef52d4430" + assert output[0].name == "Group Test" + assert output[0].description == "Group Test" + assert output[0].policy_id == 7113785 + + +def test_query_with_everything_default(cbcsdk_mock): + """Tests querying for asset groups with all default options.""" + + def on_post(uri, body, **kwargs): + tbody = copy.deepcopy(body) + if 'start' not in tbody: + tbody['start'] = 0 + assert tbody == QUERY_REQUEST_DEFAULT + return QUERY_RESPONSE + + cbcsdk_mock.mock_request('POST', '/asset_groups/v1/orgs/test/groups/_search', on_post) + api = cbcsdk_mock.api + query = api.select(AssetGroup) + output = list(query) + assert len(output) == 1 + assert output[0].id == "9b8b8d84-4a44-4a94-81ec-1f8ef52d4430" + assert output[0].name == "Group Test" + assert output[0].description == "Group Test" + assert output[0].policy_id == 7113785 + + +def test_query_async(cbcsdk_mock): + """Tests async querying for asset groups.""" + + def on_post(uri, body, **kwargs): + tbody = copy.deepcopy(body) + if 'start' not in tbody: + tbody['start'] = 0 + assert tbody == QUERY_REQUEST + return QUERY_RESPONSE + + cbcsdk_mock.mock_request('POST', '/asset_groups/v1/orgs/test/groups/_search', on_post) + api = cbcsdk_mock.api + query = api.select(AssetGroup).where("test").add_criteria("discovered", False).add_criteria("name", "Group Test") + query.add_criteria("policy_id", 7113785).add_criteria("group_id", "9b8b8d84-4a44-4a94-81ec-1f8ef52d4430") + query.sort_by("name", "ASC").set_rows(42) + future = query.execute_async() + output = future.result() + assert len(output) == 1 + assert output[0].id == "9b8b8d84-4a44-4a94-81ec-1f8ef52d4430" + assert output[0].name == "Group Test" + assert output[0].description == "Group Test" + assert output[0].policy_id == 7113785 + + +def test_query_fail_criteria_set(cb): + """Tests the failure of validation when setting criteria on a query.""" + query = cb.select(AssetGroup) + with pytest.raises(ApiError): + query.sort_by("name", "NOTADIRECTION") + + +def test_list_member_ids_basic(cbcsdk_mock): + """Tests the formatting of the 'list members' call with rows and start parameters, and the basic response.""" + cbcsdk_mock.mock_request('GET', '/asset_groups/v1/orgs/test/groups/db416fa2-d5f2-4fb5-8a5e-cd89f6ecda16', + copy.deepcopy(EXISTING_AG_DATA)) + cbcsdk_mock.mock_request('GET', '/asset_groups/v1/orgs/test/groups/db416fa2-d5f2-4fb5-8a5e-cd89f6ecda16/members?rows=20&start=0', # noqa: E501 + LIST_MEMBERS_RESPONSE1) + api = cbcsdk_mock.api + group = api.select(AssetGroup, 'db416fa2-d5f2-4fb5-8a5e-cd89f6ecda16') + rc = group.list_member_ids(rows=20, start=0) + assert rc == LIST_MEMBERS_OUTPUT1 + + +def test_list_members(cbcsdk_mock): + """Tests the device return mechanism of list_members.""" + cbcsdk_mock.mock_request('GET', '/asset_groups/v1/orgs/test/groups/db416fa2-d5f2-4fb5-8a5e-cd89f6ecda16', + copy.deepcopy(EXISTING_AG_DATA)) + cbcsdk_mock.mock_request('GET', '/asset_groups/v1/orgs/test/groups/db416fa2-d5f2-4fb5-8a5e-cd89f6ecda16/members?rows=20&start=0', # noqa: E501 + LIST_MEMBERS_RESPONSE2) + cbcsdk_mock.mock_request("GET", "/appservices/v6/orgs/test/devices/98765", GET_DEVICE_RESP) + api = cbcsdk_mock.api + group = api.select(AssetGroup, 'db416fa2-d5f2-4fb5-8a5e-cd89f6ecda16') + rc = group.list_members() + assert len(rc) == 1 + assert isinstance(rc[0], Device) + assert rc[0].id == 98765 + rc = group.list_members(membership="MANUAL") + assert len(rc) == 1 + assert isinstance(rc[0], Device) + assert rc[0].id == 98765 + rc = group.list_members(membership="DYNAMIC") + assert len(rc) == 0 + + +def test_list_members_bogus_membership(cbcsdk_mock): + """Tests the error return from list_members.""" + cbcsdk_mock.mock_request('GET', '/asset_groups/v1/orgs/test/groups/db416fa2-d5f2-4fb5-8a5e-cd89f6ecda16', + copy.deepcopy(EXISTING_AG_DATA)) + api = cbcsdk_mock.api + group = api.select(AssetGroup, 'db416fa2-d5f2-4fb5-8a5e-cd89f6ecda16') + with pytest.raises(ApiError): + group.list_members(membership="BOGUS") + + +@pytest.mark.parametrize("param, expected", [ + (14760, ["14760"]), + ([16, 99], ["16", "99"]), +]) +def test_add_members(cbcsdk_mock, param, expected): + """Tests the add_members API with various combinations of parameters.""" + def on_post(url, body, **kwargs): + assert body['action'] == 'CREATE' + assert body['external_member_ids'] == expected + return CBCSDKMock.StubResponse("", scode=204, json_parsable=False) + + cbcsdk_mock.mock_request('GET', '/asset_groups/v1/orgs/test/groups/db416fa2-d5f2-4fb5-8a5e-cd89f6ecda16', + copy.deepcopy(EXISTING_AG_DATA)) + cbcsdk_mock.mock_request('POST', '/asset_groups/v1/orgs/test/groups/db416fa2-d5f2-4fb5-8a5e-cd89f6ecda16/members', + on_post) + api = cbcsdk_mock.api + group = api.select(AssetGroup, 'db416fa2-d5f2-4fb5-8a5e-cd89f6ecda16') + group.add_members(param) + + +def test_add_members_with_device(cbcsdk_mock): + """Tests the add_members API with a Device object.""" + def on_post(url, body, **kwargs): + assert body['action'] == 'CREATE' + assert body['external_member_ids'] == ["98765"] + return CBCSDKMock.StubResponse("", scode=204, json_parsable=False) + + cbcsdk_mock.mock_request('GET', '/asset_groups/v1/orgs/test/groups/db416fa2-d5f2-4fb5-8a5e-cd89f6ecda16', + copy.deepcopy(EXISTING_AG_DATA)) + cbcsdk_mock.mock_request("GET", "/appservices/v6/orgs/test/devices/98765", GET_DEVICE_RESP) + cbcsdk_mock.mock_request('POST', '/asset_groups/v1/orgs/test/groups/db416fa2-d5f2-4fb5-8a5e-cd89f6ecda16/members', + on_post) + api = cbcsdk_mock.api + group = api.select(AssetGroup, 'db416fa2-d5f2-4fb5-8a5e-cd89f6ecda16') + device = api.select(Device, 98765) + group.add_members(device) + group.add_members([device]) + + +@pytest.mark.parametrize("param, expected", [ + (14760, ["14760"]), + ([70717, 14920], ["70717", "14920"]), +]) +def test_remove_members(cbcsdk_mock, param, expected): + """Tests the remove_members API.""" + def on_post(url, body, **kwargs): + assert body['action'] == 'REMOVE' + assert body['external_member_ids'] == expected + return CBCSDKMock.StubResponse("", scode=204, json_parsable=False) + + cbcsdk_mock.mock_request('GET', '/asset_groups/v1/orgs/test/groups/db416fa2-d5f2-4fb5-8a5e-cd89f6ecda16', + copy.deepcopy(EXISTING_AG_DATA)) + cbcsdk_mock.mock_request('POST', '/asset_groups/v1/orgs/test/groups/db416fa2-d5f2-4fb5-8a5e-cd89f6ecda16/members', + on_post) + api = cbcsdk_mock.api + group = api.select(AssetGroup, 'db416fa2-d5f2-4fb5-8a5e-cd89f6ecda16') + group.remove_members(param) + + +def test_remove_members_with_device(cbcsdk_mock): + """Tests the remove_members API with a device parameter.""" + def on_post(url, body, **kwargs): + assert body['action'] == 'REMOVE' + assert body['external_member_ids'] == ["98765"] + return CBCSDKMock.StubResponse("", scode=204, json_parsable=False) + + cbcsdk_mock.mock_request('GET', '/asset_groups/v1/orgs/test/groups/db416fa2-d5f2-4fb5-8a5e-cd89f6ecda16', + copy.deepcopy(EXISTING_AG_DATA)) + cbcsdk_mock.mock_request("GET", "/appservices/v6/orgs/test/devices/98765", GET_DEVICE_RESP) + cbcsdk_mock.mock_request('POST', '/asset_groups/v1/orgs/test/groups/db416fa2-d5f2-4fb5-8a5e-cd89f6ecda16/members', + on_post) + api = cbcsdk_mock.api + group = api.select(AssetGroup, 'db416fa2-d5f2-4fb5-8a5e-cd89f6ecda16') + device = api.select(Device, 98765) + group.remove_members(device) + group.remove_members([device]) + + +def test_get_all_groups(cbcsdk_mock): + """Tests the get_all_groups class method.""" + cbcsdk_mock.mock_request('GET', '/asset_groups/v1/orgs/test/groups', copy.deepcopy(GET_ALL_RESPONSE)) + api = cbcsdk_mock.api + rc = AssetGroup.get_all_groups(api) + assert len(rc) == 2 + assert isinstance(rc[0], AssetGroup) + assert isinstance(rc[1], AssetGroup) + assert rc[0].id == "db416fa2-d5f2-4fb5-8a5e-cd89f6ecda16" + assert rc[1].id == "509f437f-6b9a-4b8e-996e-9183b35f9069" + + +def test_get_statistics(cbcsdk_mock): + """Tests the get_statistics method.""" + cbcsdk_mock.mock_request('GET', '/asset_groups/v1/orgs/test/groups/db416fa2-d5f2-4fb5-8a5e-cd89f6ecda16', + copy.deepcopy(EXISTING_AG_DATA)) + cbcsdk_mock.mock_request('GET', + '/asset_groups/v1/orgs/test/groups/db416fa2-d5f2-4fb5-8a5e-cd89f6ecda16/membership_summary', # noqa: E501 + GET_STATS_RESPONSE) + api = cbcsdk_mock.api + group = api.select(AssetGroup, 'db416fa2-d5f2-4fb5-8a5e-cd89f6ecda16') + rc = group.get_statistics() + assert rc == GET_STATS_RESPONSE + + +def test_preview_delete_asset_groups(cbcsdk_mock): + """Tests the preview_delete_asset_groups function.""" + def on_post(url, body, **kwargs): + assert body == PREVIEW_DELETE_REQUEST + return PREVIEW_NULL_RESPONSE + + cbcsdk_mock.mock_request('GET', '/asset_groups/v1/orgs/test/groups/db416fa2-d5f2-4fb5-8a5e-cd89f6ecda16', + copy.deepcopy(EXISTING_AG_DATA)) + cbcsdk_mock.mock_request("POST", "/policy-assignment/v1/orgs/test/asset-groups/preview", on_post) + api = cbcsdk_mock.api + group = api.select(AssetGroup, 'db416fa2-d5f2-4fb5-8a5e-cd89f6ecda16') + rc = AssetGroup.preview_delete_asset_groups(api, [group, "149cea01-2a13-4a0a-8ca9-cdf359a6378e"]) + assert len(rc) == 0 + + +def test_preview_add_members(cbcsdk_mock): + """Tests the preview_add_members and preview_add_members_to_groups functions.""" + def on_post(url, body, **kwargs): + assert body == PREVIEW_ADD_MEMBERS_REQUEST + return PREVIEW_NULL_RESPONSE + + cbcsdk_mock.mock_request('GET', '/asset_groups/v1/orgs/test/groups/db416fa2-d5f2-4fb5-8a5e-cd89f6ecda16', + copy.deepcopy(EXISTING_AG_DATA)) + cbcsdk_mock.mock_request("POST", "/policy-assignment/v1/orgs/test/asset-groups/preview", on_post) + api = cbcsdk_mock.api + group = api.select(AssetGroup, 'db416fa2-d5f2-4fb5-8a5e-cd89f6ecda16') + rc = group.preview_add_members([123, 456]) + assert len(rc) == 0 + + +def test_preview_remove_members(cbcsdk_mock): + """Tests the preview_remove_members and preview_remove_members_from_groups functions.""" + def on_post(url, body, **kwargs): + assert body == PREVIEW_REMOVE_MEMBERS_REQUEST + return PREVIEW_NULL_RESPONSE + + cbcsdk_mock.mock_request('GET', '/asset_groups/v1/orgs/test/groups/db416fa2-d5f2-4fb5-8a5e-cd89f6ecda16', + copy.deepcopy(EXISTING_AG_DATA)) + cbcsdk_mock.mock_request("POST", "/policy-assignment/v1/orgs/test/asset-groups/preview", on_post) + api = cbcsdk_mock.api + group = api.select(AssetGroup, 'db416fa2-d5f2-4fb5-8a5e-cd89f6ecda16') + rc = group.preview_remove_members([123, 456]) + assert len(rc) == 0 + + +@pytest.mark.parametrize("field, newvalue, expected_body", [ + ("policy_id", 32768, PREVIEW_UPDATE_REQUEST_1), + ("policy_id", None, PREVIEW_UPDATE_REQUEST_2), + ("query", "os: WINDOWS OR MACOS", PREVIEW_UPDATE_REQUEST_3), + ("query", None, PREVIEW_UPDATE_REQUEST_4) +]) +def test_preview_save(cbcsdk_mock, field, newvalue, expected_body): + """Tests the preview_save and preview_update_asset_groups functions.""" + posted = False + + def on_post(url, body, **kwargs): + assert body == expected_body + nonlocal posted + posted = True + return PREVIEW_NULL_RESPONSE + + cbcsdk_mock.mock_request('GET', '/asset_groups/v1/orgs/test/groups/16b0dd95-85a3-4f73-bcf4-9b666436c534', + copy.deepcopy(EXISTING_AG_DATA_3)) + cbcsdk_mock.mock_request("POST", "/policy-assignment/v1/orgs/test/asset-groups/preview", on_post) + api = cbcsdk_mock.api + group = api.select(AssetGroup, '16b0dd95-85a3-4f73-bcf4-9b666436c534') + group._set(field, newvalue) + rc = group.preview_save() + assert len(rc) == 0 + assert posted + + +def test_preview_update_asset_groups_null_response(cb): + """Tests the null response from preview_update_asset_groups.""" + rc = AssetGroup.preview_update_asset_groups(cb, ['db416fa2-d5f2-4fb5-8a5e-cd89f6ecda16', + '16b0dd95-85a3-4f73-bcf4-9b666436c534']) + assert len(rc) == 0 + + +def test_preview_delete(cbcsdk_mock): + """Tests the preview_delete function.""" + def on_post(url, body, **kwargs): + assert body == PREVIEW_DELETE_REQUEST_2 + return PREVIEW_NULL_RESPONSE + + cbcsdk_mock.mock_request('GET', '/asset_groups/v1/orgs/test/groups/db416fa2-d5f2-4fb5-8a5e-cd89f6ecda16', + copy.deepcopy(EXISTING_AG_DATA)) + cbcsdk_mock.mock_request("POST", "/policy-assignment/v1/orgs/test/asset-groups/preview", on_post) + api = cbcsdk_mock.api + group = api.select(AssetGroup, 'db416fa2-d5f2-4fb5-8a5e-cd89f6ecda16') + rc = group.preview_delete() + assert len(rc) == 0 + + +def test_preview_create_asset_group(cbcsdk_mock): + """Tests the preview_create_asset_group function.""" + def on_post(url, body, **kwargs): + assert body == PREVIEW_CREATE_REQUEST + return PREVIEW_NULL_RESPONSE + + cbcsdk_mock.mock_request("POST", "/policy-assignment/v1/orgs/test/asset-groups/preview", on_post) + api = cbcsdk_mock.api + rc = AssetGroup.preview_create_asset_group(api, 123456, "os.equals:WINDOWS") + assert len(rc) == 0 diff --git a/src/tests/unit/platform/test_devicev6_api.py b/src/tests/unit/platform/test_devicev6_api.py index 7785dda4e..eff6bf2ff 100755 --- a/src/tests/unit/platform/test_devicev6_api.py +++ b/src/tests/unit/platform/test_devicev6_api.py @@ -13,11 +13,12 @@ import pytest from cbc_sdk.errors import ApiError, ServerError -from cbc_sdk.platform import Device, DeviceFacet +from cbc_sdk.platform import Device, DeviceFacet, Job from cbc_sdk.rest_api import CBCloudAPI from tests.unit.fixtures.CBCSDKMock import CBCSDKMock from tests.unit.fixtures.platform.mock_devices import (FACET_RESPONSE, FACET_INIT_1, FACET_INIT_2, FACET_INIT_3, - FACET_INIT_4, FACET_INIT_5, FACET_INIT_6, FACET_INIT_7) + FACET_INIT_4, FACET_INIT_5, FACET_INIT_6, FACET_INIT_7, + GET_SCROLL_DEVICES, EXPORT_JOB_REDIRECT) @pytest.fixture(scope="function") @@ -377,3 +378,58 @@ def on_query(url, body, **kwargs): query = api.select(Device).set_deployment_type(["ENDPOINT"]) d = query.one() assert d.deployment_type[0] in ["ENDPOINT", "WORKLOAD"] + + +def test_device_scroll(cbcsdk_mock): + """Testing DeviceSearchQuery scroll""" + cbcsdk_mock.mock_request("POST", "/appservices/v6/orgs/test/devices/_scroll", + GET_SCROLL_DEVICES(100, 200, 100)) + + api = cbcsdk_mock.api + query = api.select(Device).set_deployment_type(["ENDPOINT"]) + + results = query.scroll(100) + + assert query.num_remaining == 100 + assert query._search_after == "MTcwMjMyMTM2MDU3OSwyMT" + + def on_post(url, body, **kwargs): + """Test 2nd scroll request""" + assert body == { + "criteria": { + "deployment_type": ["ENDPOINT"] + }, + "rows": 10000, + "search_after": "MTcwMjMyMTM2MDU3OSwyMT" + } + return GET_SCROLL_DEVICES(100, 200, 0) + + cbcsdk_mock.mock_request("POST", "/appservices/v6/orgs/test/devices/_scroll", + on_post) + + results.extend(query.scroll(20000)) + + assert len(results) == 200 + + assert query.scroll(100) == [] + + +def test_device_export(cbcsdk_mock): + """Test the export functionality of the DeviceSearchQuery.""" + api = cbcsdk_mock.api + cbcsdk_mock.mock_request("GET", "/jobs/v1/orgs/test/jobs/11608915", EXPORT_JOB_REDIRECT) + + def post_validate(url, body, **kwargs): + nonlocal api + assert body['format'] == "CSV" + + # CBC Backend uses 303 Redirect which has been mocked out with follow up API call + return api.get_object("/jobs/v1/orgs/test/jobs/11608915") + + cbcsdk_mock.mock_request("POST", "/appservices/v6/orgs/test/devices/_export", post_validate) + + query = api.select(Device).set_status(["ACTIVE"]) + job = query.export() + assert job + assert isinstance(job, Job) + assert job.id == 11608915 diff --git a/src/tests/unit/platform/test_observations.py b/src/tests/unit/platform/test_observations.py index 1a05f678c..b8e2716f5 100644 --- a/src/tests/unit/platform/test_observations.py +++ b/src/tests/unit/platform/test_observations.py @@ -18,6 +18,9 @@ GET_OBSERVATIONS_SEARCH_JOB_RESULTS_RESP_0, GET_OBSERVATIONS_SEARCH_JOB_RESULTS_RESP_ZERO_COMP, GET_OBSERVATIONS_DETAIL_JOB_RESULTS_RESP, + GET_OBSERVATIONS_DETAIL_JOB_RESULTS_FOR_DEOBFUSCATE, + OBS_DEOBFUSCATE_CMDLINE_REQUEST, + OBS_DEOBFUSCATE_CMDLINE_RESPONSE, GET_OBSERVATIONS_SEARCH_JOB_RESULTS_RESP, GET_OBSERVATIONS_SEARCH_JOB_RESULTS_NO_RULE_ID_RESP, POST_OBSERVATIONS_FACET_SEARCH_JOB_RESP, @@ -441,9 +444,15 @@ def test_observations_timeout(cbcsdk_mock): ) api = cbcsdk_mock.api query = api.select(Observation).where("observation_id:some_id") - assert query._timeout == 0 + assert query._timeout == 300000 query.timeout(msecs=500) assert query._timeout == 500 + query.timeout(msecs=999999) + assert query._timeout == 300000 + query.timeout(msecs=700) + assert query._timeout == 700 + query.timeout(msecs=0) + assert query._timeout == 300000 def test_observations_timeout_error(cbcsdk_mock): @@ -662,6 +671,30 @@ def test_observations_still_querying2(cbcsdk_mock): assert obs_list._still_querying() is True +def test_observation_deobfuscate_cmdline(cbcsdk_mock): + """Test the deobfuscate_cmdline() function.""" + def on_post_deobfuscate(url, body, **kwargs): + assert body == OBS_DEOBFUSCATE_CMDLINE_REQUEST + return OBS_DEOBFUSCATE_CMDLINE_RESPONSE + + cbcsdk_mock.mock_request("POST", "/api/investigate/v2/orgs/test/observations/detail_jobs", + POST_OBSERVATIONS_SEARCH_JOB_RESP) + cbcsdk_mock.mock_request( + "GET", + "/api/investigate/v2/orgs/test/observations/detail_jobs/08ffa932-b633-4107-ba56-8741e929e48b/results", + GET_OBSERVATIONS_DETAIL_JOB_RESULTS_FOR_DEOBFUSCATE) + cbcsdk_mock.mock_request("POST", "/tau/v2/orgs/test/reveal", on_post_deobfuscate) + + api = cbcsdk_mock.api + obs = Observation(api, initial_data={"observation_id": "test"}) + observation = obs._get_detailed_results() + deobfuscation = observation.deobfuscate_cmdline() + assert len(deobfuscation['identities']) == 1 + assert len(deobfuscation['strings']) == 1 + assert deobfuscation['deobfuscated_code'] == \ + "Write-Output \"No matter how thin you slice it, it's still baloney.\"\n" + + # --------------------- ObservationFacet -------------------------------------- @@ -786,9 +819,15 @@ def test_observation_facet_timeout(cbcsdk_mock): .where("process_name:some_name") .add_facet_field("process_name") ) - assert query._timeout == 0 + assert query._timeout == 300000 query.timeout(msecs=500) assert query._timeout == 500 + query.timeout(msecs=999999) + assert query._timeout == 300000 + query.timeout(msecs=700) + assert query._timeout == 700 + query.timeout(msecs=0) + assert query._timeout == 300000 def test_observation_facet_timeout_error(cbcsdk_mock): @@ -1189,16 +1228,16 @@ def test_observations_search_suggestions_api_error(): Observation.search_suggestions("", "device_id", 10) -def test_bulk_get_details_api_error(): - """Tests bulk_get_details - no CBCloudAPI arg""" +def test_bulk_get_details_api_error(cb): + """Tests bulk_get_details""" with pytest.raises(ApiError): - Observation.bulk_get_details("", alert_id="xx") + Observation.bulk_get_details(cb, alert_id="xx") -def test_helper_get_details_api_error(): - """Tests _helper_get_details - no CBCloudAPI arg""" +def test_helper_get_details_api_error(cb): + """Tests _helper_get_details""" with pytest.raises(ApiError): - Observation._helper_get_details("", alert_id="xx") + Observation._helper_get_details(cb, alert_id="xx") def test_bulk_get_details_neither(cbcsdk_mock): diff --git a/src/tests/unit/platform/test_platform_devices.py b/src/tests/unit/platform/test_platform_devices.py index 0daa00a22..5d6827ad5 100644 --- a/src/tests/unit/platform/test_platform_devices.py +++ b/src/tests/unit/platform/test_platform_devices.py @@ -2,14 +2,22 @@ import pytest import logging -from cbc_sdk.platform import Device, DeviceSearchQuery +import copy +from cbc_sdk.platform import AssetGroup, Device, DeviceSearchQuery from cbc_sdk.rest_api import CBCloudAPI from cbc_sdk.errors import ApiError from tests.unit.fixtures.CBCSDKMock import CBCSDKMock -from tests.unit.fixtures.platform.mock_devices import (GET_DEVICE_RESP, - POST_DEVICE_SEARCH_RESP) +from tests.unit.fixtures.platform.mock_asset_groups import EXISTING_AG_DATA, EXISTING_AG_DATA_2 +from tests.unit.fixtures.platform.mock_devices import (GET_DEVICE_RESP, POST_DEVICE_SEARCH_RESP, + ASSET_GROUPS_RESPONSE_1, ASSET_GROUPS_OUTPUT_1, + ASSET_GROUPS_RESPONSE_2, ASSET_GROUPS_OUTPUT_2, + ASSET_GROUPS_RESPONSE_3, ASSET_GROUPS_OUTPUT_3, + ASSET_GROUPS_RESPONSE_SINGLE, ASSET_GROUPS_OUTPUT_SINGLE, + ADD_POLICY_OVERRIDE_REQUEST, ADD_POLICY_OVERRIDE_RESPONSE, + REMOVE_POLICY_OVERRIDE_REQUEST, REMOVE_POLICY_OVERRIDE_RESPONSE) -log = logging.basicConfig(format='%(asctime)s %(levelname)s:%(message)s', level=logging.DEBUG, filename='log.txt') + +logging.basicConfig(format='%(asctime)s %(levelname)s:%(message)s', level=logging.DEBUG, filename='log.txt') @pytest.fixture(scope="function") @@ -91,3 +99,203 @@ def test_device_max_rows(cbcsdk_mock): with pytest.raises(ApiError): query.set_max_rows(10001) + + +@pytest.mark.parametrize("param, filt, memberids, response, output", [ + ([98765, 3031, 1777], "ALL", ["98765", "3031", "1777"], ASSET_GROUPS_RESPONSE_1, ASSET_GROUPS_OUTPUT_1), + ([98765, 3031, 1777], "DYNAMIC", ["98765", "3031", "1777"], ASSET_GROUPS_RESPONSE_2, ASSET_GROUPS_OUTPUT_2), + ([98765, 3031, 1777], "MANUAL", ["98765", "3031", "1777"], ASSET_GROUPS_RESPONSE_3, ASSET_GROUPS_OUTPUT_3), + (98765, "ALL", ["98765"], ASSET_GROUPS_RESPONSE_SINGLE, ASSET_GROUPS_OUTPUT_SINGLE) +]) +def test_get_asset_groups_for_devices(cbcsdk_mock, param, filt, memberids, response, output): + """Tests the get_asset_groups_for_devices function.""" + def on_post(url, body, **kwargs): + assert body['external_member_ids'] == memberids + if filt == "ALL": + assert 'membership_type' not in body + else: + assert body['membership_type'] == [filt] + return response + + cbcsdk_mock.mock_request('POST', '/asset_groups/v1/orgs/test/members', on_post) + api = cbcsdk_mock.api + rc = Device.get_asset_groups_for_devices(api, param, membership=filt) + assert rc == output + + +def test_get_asset_groups_for_devices_with_device(cbcsdk_mock): + """Tests get_asset_groups_for_devices with a Device parameter.""" + def on_post(url, body, **kwargs): + assert body['external_member_ids'] == ["98765"] + assert 'membership_type' not in body + return ASSET_GROUPS_RESPONSE_SINGLE + + cbcsdk_mock.mock_request("GET", "/appservices/v6/orgs/test/devices/98765", GET_DEVICE_RESP) + cbcsdk_mock.mock_request('POST', '/asset_groups/v1/orgs/test/members', on_post) + api = cbcsdk_mock.api + device = api.select(Device, 98765) + rc = Device.get_asset_groups_for_devices(api, device) + assert rc == ASSET_GROUPS_OUTPUT_SINGLE + rc = Device.get_asset_groups_for_devices(api, [device]) + assert rc == ASSET_GROUPS_OUTPUT_SINGLE + + +def test_get_asset_groups_for_devices_null_and_error_responses(cb): + """Tests the error responses from test_get_asset_groups_for_devices.""" + assert Device.get_asset_groups_for_devices(cb, "bogus_value") == {} + with pytest.raises(ApiError): + Device.get_asset_groups_for_devices(cb, 98765, membership="BOGUS") + + +@pytest.mark.parametrize("membership, result", [ + ("ALL", ["db416fa2-d5f2-4fb5-8a5e-cd89f6ecda16", "509f437f-6b9a-4b8e-996e-9183b35f9069"]), + ("MANUAL", ["db416fa2-d5f2-4fb5-8a5e-cd89f6ecda16"]), + ("DYNAMIC", ["509f437f-6b9a-4b8e-996e-9183b35f9069"]) +]) +def test_device_get_asset_group_ids(cbcsdk_mock, membership, result): + """Tests the get_asset_group_ids Device function.""" + cbcsdk_mock.mock_request("GET", "/appservices/v6/orgs/test/devices/98765", GET_DEVICE_RESP) + api = cbcsdk_mock.api + device = api.select(Device, 98765) + assert device.get_asset_group_ids(membership=membership) == result + + +def test_device_get_asset_group_ids_bogus_value(cbcsdk_mock): + """Tests a bogus value passed to the membership parameter of the get_asset_group_ids Device function.""" + cbcsdk_mock.mock_request("GET", "/appservices/v6/orgs/test/devices/98765", GET_DEVICE_RESP) + api = cbcsdk_mock.api + device = api.select(Device, 98765) + with pytest.raises(ApiError): + device.get_asset_group_ids("BOGUS") + + +def test_device_get_asset_groups(cbcsdk_mock): + """Tests the get_asset_groups Device function.""" + cbcsdk_mock.mock_request("GET", "/appservices/v6/orgs/test/devices/98765", GET_DEVICE_RESP) + cbcsdk_mock.mock_request('GET', '/asset_groups/v1/orgs/test/groups/db416fa2-d5f2-4fb5-8a5e-cd89f6ecda16', + copy.deepcopy(EXISTING_AG_DATA)) + cbcsdk_mock.mock_request('GET', '/asset_groups/v1/orgs/test/groups/509f437f-6b9a-4b8e-996e-9183b35f9069', + copy.deepcopy(EXISTING_AG_DATA_2)) + api = cbcsdk_mock.api + device = api.select(Device, 98765) + result = device.get_asset_groups() + assert len(result) == 2 + assert isinstance(result[0], AssetGroup) + assert isinstance(result[1], AssetGroup) + assert result[0].id == "db416fa2-d5f2-4fb5-8a5e-cd89f6ecda16" + assert result[1].id == "509f437f-6b9a-4b8e-996e-9183b35f9069" + + +def test_device_add_to_groups_by_id(cbcsdk_mock): + """Tests the add_to_groups_by_id Device function.""" + def on_post(url, body, **kwargs): + assert body['action'] == 'CREATE' + assert body['external_member_ids'] == ["98765"] + return CBCSDKMock.StubResponse("", scode=204, json_parsable=False) + + cbcsdk_mock.mock_request("GET", "/appservices/v6/orgs/test/devices/98765", GET_DEVICE_RESP) + cbcsdk_mock.mock_request('POST', '/asset_groups/v1/orgs/test/groups/149cea01-2a13-4a0a-8ca9-cdf359a6378e/members', + on_post) + api = cbcsdk_mock.api + device = api.select(Device, 98765) + device.add_to_groups_by_id(["db416fa2-d5f2-4fb5-8a5e-cd89f6ecda16", "149cea01-2a13-4a0a-8ca9-cdf359a6378e"]) + + +def test_device_add_to_groups(cbcsdk_mock): + """Tests the add_to_groups Device function.""" + def on_post(url, body, **kwargs): + assert body['action'] == 'CREATE' + assert body['external_member_ids'] == ["98765"] + return CBCSDKMock.StubResponse("", scode=204, json_parsable=False) + + cbcsdk_mock.mock_request("GET", "/appservices/v6/orgs/test/devices/98765", GET_DEVICE_RESP) + cbcsdk_mock.mock_request('GET', '/asset_groups/v1/orgs/test/groups/db416fa2-d5f2-4fb5-8a5e-cd89f6ecda16', + copy.deepcopy(EXISTING_AG_DATA)) + cbcsdk_mock.mock_request('GET', '/asset_groups/v1/orgs/test/groups/509f437f-6b9a-4b8e-996e-9183b35f9069', + copy.deepcopy(EXISTING_AG_DATA_2)) + cbcsdk_mock.mock_request('POST', '/asset_groups/v1/orgs/test/groups/509f437f-6b9a-4b8e-996e-9183b35f9069/members', + on_post) + api = cbcsdk_mock.api + device = api.select(Device, 98765) + asset_group_1 = api.select(AssetGroup, "db416fa2-d5f2-4fb5-8a5e-cd89f6ecda16") + asset_group_2 = api.select(AssetGroup, "509f437f-6b9a-4b8e-996e-9183b35f9069") + device.add_to_groups([asset_group_1, asset_group_2]) + + +def test_device_remove_from_groups_by_id(cbcsdk_mock): + """Tests the remove_from_groups_by_id Device function.""" + def on_post(url, body, **kwargs): + assert body['action'] == 'REMOVE' + assert body['external_member_ids'] == ["98765"] + return CBCSDKMock.StubResponse("", scode=204, json_parsable=False) + + cbcsdk_mock.mock_request("GET", "/appservices/v6/orgs/test/devices/98765", GET_DEVICE_RESP) + cbcsdk_mock.mock_request('POST', '/asset_groups/v1/orgs/test/groups/db416fa2-d5f2-4fb5-8a5e-cd89f6ecda16/members', + on_post) + api = cbcsdk_mock.api + device = api.select(Device, 98765) + device.remove_from_groups_by_id(["db416fa2-d5f2-4fb5-8a5e-cd89f6ecda16", "149cea01-2a13-4a0a-8ca9-cdf359a6378e"]) + + +def test_device_remove_from_groups(cbcsdk_mock): + """Tests the remove_from_groups Device function.""" + def on_post(url, body, **kwargs): + assert body['action'] == 'REMOVE' + assert body['external_member_ids'] == ["98765"] + return CBCSDKMock.StubResponse("", scode=204, json_parsable=False) + + cbcsdk_mock.mock_request("GET", "/appservices/v6/orgs/test/devices/98765", GET_DEVICE_RESP) + cbcsdk_mock.mock_request('GET', '/asset_groups/v1/orgs/test/groups/db416fa2-d5f2-4fb5-8a5e-cd89f6ecda16', + copy.deepcopy(EXISTING_AG_DATA)) + cbcsdk_mock.mock_request('GET', '/asset_groups/v1/orgs/test/groups/509f437f-6b9a-4b8e-996e-9183b35f9069', + copy.deepcopy(EXISTING_AG_DATA_2)) + cbcsdk_mock.mock_request('POST', '/asset_groups/v1/orgs/test/groups/db416fa2-d5f2-4fb5-8a5e-cd89f6ecda16/members', + on_post) + api = cbcsdk_mock.api + device = api.select(Device, 98765) + asset_group_1 = api.select(AssetGroup, "db416fa2-d5f2-4fb5-8a5e-cd89f6ecda16") + asset_group_2 = api.select(AssetGroup, "509f437f-6b9a-4b8e-996e-9183b35f9069") + device.remove_from_groups([asset_group_1, asset_group_2]) + + +def test_preview_add_policy_override(cbcsdk_mock): + """Tests the preview_add_policy_override_for_devices function""" + def on_post(url, body, **kwargs): + assert body == ADD_POLICY_OVERRIDE_REQUEST + return ADD_POLICY_OVERRIDE_RESPONSE + + cbcsdk_mock.mock_request("GET", "/appservices/v6/orgs/test/devices/98765", GET_DEVICE_RESP) + cbcsdk_mock.mock_request("POST", "/policy-assignment/v1/orgs/test/asset-groups/preview", on_post) + api = cbcsdk_mock.api + device = api.select(Device, 98765) + preview = Device.preview_add_policy_override_for_devices(api, 1011, [device]) + assert len(preview) == 1 + assert preview[0].current_policy_id == 11200 + assert preview[0].new_policy_id == 1011 + assert preview[0].asset_count == 1 + + +def test_preview_remove_policy_override(cbcsdk_mock): + """Tests the preview_remove_policy_override and preview_remove_policy_override_for_devices functions""" + def on_post(url, body, **kwargs): + assert body == REMOVE_POLICY_OVERRIDE_REQUEST + return REMOVE_POLICY_OVERRIDE_RESPONSE + + cbcsdk_mock.mock_request("GET", "/appservices/v6/orgs/test/devices/98765", GET_DEVICE_RESP) + cbcsdk_mock.mock_request("POST", "/policy-assignment/v1/orgs/test/asset-groups/preview", on_post) + api = cbcsdk_mock.api + device = api.select(Device, 98765) + preview = device.preview_remove_policy_override() + assert len(preview) == 1 + assert preview[0].current_policy_id == 11200 + assert preview[0].new_policy_id == 14760 + assert preview[0].asset_count == 1 + + +def test_device_tojson_inheritance(cbcsdk_mock): + """Testing Device tojson() with .select(Device, `device_id`)""" + cbcsdk_mock.mock_request("GET", "/appservices/v6/orgs/test/devices/98765", GET_DEVICE_RESP) + api = cbcsdk_mock.api + platform_device_select_with_id = api.select(Device, 98765) + platform_device_select_with_id.refresh() + assert platform_device_select_with_id.to_json() == GET_DEVICE_RESP diff --git a/src/tests/unit/platform/test_platform_process.py b/src/tests/unit/platform/test_platform_process.py index 5c16b80d8..e4d64807d 100644 --- a/src/tests/unit/platform/test_platform_process.py +++ b/src/tests/unit/platform/test_platform_process.py @@ -38,7 +38,9 @@ EXPECTED_PROCESS_RANGES_FACETS, GET_PROCESS_TREE_STR, GET_PROCESS_SUMMARY_STR, - GET_PROCESS_DETAILS_JOB_RESULTS_RESP_ZERO) + GET_PROCESS_DETAILS_JOB_RESULTS_RESP_ZERO, + PROCESS_OBFUSCATED_CMDLINE, + PROCESS_DEOBFUSCATE_CMDLINE_RESPONSE) log = logging.basicConfig(format='%(asctime)s %(levelname)s:%(message)s', level=logging.DEBUG, filename='log.txt') @@ -267,6 +269,56 @@ def test_summary_select_set_time_range_failures(cbcsdk_mock): assert 'Window must be a string.' in ex.value.message +def test_summary_query_timeout(cb): + """Tests the timeout setting on SummaryQuery.""" + query = cb.select(Process.Summary).where("process_guid:WNEXFKQ7-0002b226-000015bd-00000000-1d6225bbba74c00") + assert query._timeout == 300000 + query.timeout(500) + assert query._timeout == 500 + query.timeout(999999) + assert query._timeout == 300000 + query.timeout(700) + assert query._timeout == 700 + query.timeout(0) + assert query._timeout == 300000 + + +def test_process_deobfuscate_cmdline(cbcsdk_mock): + """Test the deobfuscate_cmdline() method.""" + def on_validation_post(url, body, **kwargs): + assert body == {"query": "process_guid:WNEXFKQ7\\-0002b226\\-000015bd\\-00000000\\-1d6225bbba74c00"} + return POST_PROCESS_VALIDATION_RESP + + def on_post_deobfuscate(url, body, **kwargs): + assert body == {"input": PROCESS_OBFUSCATED_CMDLINE} + return PROCESS_DEOBFUSCATE_CMDLINE_RESPONSE + + # mock the search validation + cbcsdk_mock.mock_request("POST", "/api/investigate/v2/orgs/test/processes/search_validation", on_validation_post) + # mock the POST of a search + cbcsdk_mock.mock_request("POST", "/api/investigate/v2/orgs/test/processes/search_jobs", + POST_PROCESS_SEARCH_JOB_RESP) + # mock the GET to check search status + cbcsdk_mock.mock_request("GET", ("/api/investigate/v2/orgs/test/processes/" + "search_jobs/2c292717-80ed-4f0d-845f-779e09470920/results?start=0&rows=0"), + GET_PROCESS_SEARCH_JOB_RESP) + # mock the GET to get search results + cbcsdk_mock.mock_request("GET", ("/api/investigate/v2/orgs/test/processes/search_jobs/" + "2c292717-80ed-4f0d-845f-779e09470920/results?start=0&rows=500"), + GET_PROCESS_SEARCH_JOB_RESULTS_RESP) + cbcsdk_mock.mock_request("POST", "/tau/v2/orgs/test/reveal", on_post_deobfuscate) + + api = cbcsdk_mock.api + process = api.select(Process, 'WNEXFKQ7-0002b226-000015bd-00000000-1d6225bbba74c00') + # poke the command line so we have something to deobfuscate + process._info['process_cmdline'] = [PROCESS_OBFUSCATED_CMDLINE] + deobfuscation = process.deobfuscate_cmdline() + assert len(deobfuscation['identities']) == 1 + assert len(deobfuscation['strings']) == 1 + assert deobfuscation['deobfuscated_code'] == \ + "Write-Output \"No matter how thin you slice it, it's still baloney.\"\n" + + def test_process_events(cbcsdk_mock): """Testing Process.events().""" def on_validation_post(url, body, **kwargs): @@ -508,6 +560,17 @@ def test_process_start_rows(cbcsdk_mock): assert process._batch_size == 102 +def test_process_search_set_rows_failure(cbcsdk_mock): + """Test what happens when we set rows to something nonsensical.""" + api = cbcsdk_mock.api + process = api.select(Process).where("event_type:modload").add_criteria("device_id", [1234]).add_exclusions( + "crossproc_effective_reputation", ["REP_WHITE"]) + with pytest.raises(ApiError): + process.set_rows('Bogus') + with pytest.raises(ApiError): + process.set_rows(65536) + + def test_process_sort(cbcsdk_mock): """Testing AsyncProcessQuery.sort_by().""" api = cbcsdk_mock.api diff --git a/src/tests/unit/platform/test_platform_query.py b/src/tests/unit/platform/test_platform_query.py index 6e5fd71d0..0b13f8623 100644 --- a/src/tests/unit/platform/test_platform_query.py +++ b/src/tests/unit/platform/test_platform_query.py @@ -194,9 +194,15 @@ def test_async_timeout(cbcsdk_mock): """Testing AsyncProcessQuery.timeout().""" api = cbcsdk_mock.api async_query = api.select(Process).where("process_guid:someguid") - assert async_query._timeout == 0 + assert async_query._timeout == 300000 async_query.timeout(msecs=500) assert async_query._timeout == 500 + async_query.timeout(msecs=999999) + assert async_query._timeout == 300000 + async_query.timeout(msecs=700) + assert async_query._timeout == 700 + async_query.timeout(msecs=0) + assert async_query._timeout == 300000 def test_async_submit(cbcsdk_mock): @@ -243,9 +249,15 @@ def test_async_facet_query_timeout(cbcsdk_mock): """Testing AsyncFacetQuery timeout()""" api = cbcsdk_mock.api facet_query = api.select(ProcessFacet).where("process_name:svchost.exe") - assert facet_query._timeout == 0 + assert facet_query._timeout == 300000 facet_query.timeout(5000) assert facet_query._timeout == 5000 + facet_query.timeout(999999) + assert facet_query._timeout == 300000 + facet_query.timeout(2000) + assert facet_query._timeout == 2000 + facet_query.timeout(0) + assert facet_query._timeout == 300000 def test_async_facet_limit(cbcsdk_mock): diff --git a/src/tests/unit/platform/test_policies.py b/src/tests/unit/platform/test_policies.py index 48703ff1a..d8938bd18 100644 --- a/src/tests/unit/platform/test_policies.py +++ b/src/tests/unit/platform/test_policies.py @@ -17,14 +17,23 @@ import random from contextlib import ExitStack as does_not_raise from cbc_sdk.rest_api import CBCloudAPI -from cbc_sdk.platform import Policy, PolicyRule, PolicyRuleConfig +from cbc_sdk.platform import Policy, PolicyRule, PolicyRuleConfig, DevicePolicyChangePreview +from cbc_sdk.platform.devices import DeviceSearchQuery from cbc_sdk.errors import ApiError, InvalidObjectError, ServerError from tests.unit.fixtures.CBCSDKMock import CBCSDKMock from tests.unit.fixtures.platform.mock_policies import (FULL_POLICY_1, SUMMARY_POLICY_1, SUMMARY_POLICY_2, SUMMARY_POLICY_3, OLD_POLICY_1, FULL_POLICY_2, OLD_POLICY_2, RULE_ADD_1, RULE_ADD_2, RULE_MODIFY_1, NEW_POLICY_CONSTRUCT_1, NEW_POLICY_RETURN_1, BASIC_CONFIG_TEMPLATE_RETURN, - BUILD_RULECONFIG_1) + BUILD_RULECONFIG_1, SET_XDR_COLLECTION_REQUEST, + SET_XDR_COLLECTION_RESPONSE, SET_AUTH_EVENT_COLLECTION_REQUEST, + SET_AUTH_EVENT_COLLECTION_RESPONSE, + SET_AUTH_EVENT_COLLECTION_RESPONSE_ERROR, + PREVIEW_POLICY_CHANGES_REQUEST1, + PREVIEW_POLICY_CHANGES_RESPONSE1, + PREVIEW_POLICY_CHANGES_REQUEST2, + PREVIEW_POLICY_CHANGES_RESPONSE2, FULL_POLICY_5, + ADD_POLICY_OVERRIDE_REQUEST, ADD_POLICY_OVERRIDE_RESPONSE) logging.basicConfig(format='%(asctime)s %(levelname)s:%(message)s', level=logging.DEBUG, filename='log.txt') @@ -72,6 +81,7 @@ def test_policy_compatibility_aliases_write(cb): policy.policy = copy.deepcopy(OLD_POLICY_2) policy.description = "Hoopy Frood" policy.name = "default - S1" + policy.position = 2 policy.priorityLevel = "MEDIUM" policy.version = 2 new_policy_data = copy.deepcopy(policy._info) @@ -540,3 +550,128 @@ def test_policy_builder_error_handling(cb): builder.set_on_demand_scan_schedule(["WEDNESDAY", "FRIDAY", "HELLDAY"], 0, 6) with pytest.raises(ApiError): builder.add_sensor_setting("LONG_RANGE", "true") + + +def test_set_xdr_collection(cbcsdk_mock): + """Tests the set_xdr_collection method.""" + def on_put(url, body, **kwargs): + assert body == SET_XDR_COLLECTION_REQUEST + return copy.deepcopy(SET_XDR_COLLECTION_RESPONSE) + + cbcsdk_mock.mock_request('PUT', '/policyservice/v1/orgs/test/policies/65536/rule_configs/data_collection', on_put) + api = cbcsdk_mock.api + policy = Policy(api, 65536, copy.deepcopy(FULL_POLICY_1), False, True) + policy.set_xdr_collection(True) + rconf = policy.data_collection_rule_configs['cc075469-8d1e-4056-84b6-0e6f437c4010'] + assert rconf.get_parameter("enable_network_data_collection") is True + + +def test_set_auth_event_collection(cbcsdk_mock): + """Tests the set_auth_event_collection method.""" + def on_put(url, body, **kwargs): + assert body == SET_AUTH_EVENT_COLLECTION_REQUEST + return copy.deepcopy(SET_AUTH_EVENT_COLLECTION_RESPONSE) + + cbcsdk_mock.mock_request('PUT', '/policyservice/v1/orgs/test/policies/65536/rule_configs/data_collection', on_put) + api = cbcsdk_mock.api + policy = Policy(api, 65536, copy.deepcopy(FULL_POLICY_1), False, True) + policy.set_auth_event_collection(False) + rconf = policy.data_collection_rule_configs['91c919da-fb90-4e63-9eac-506255b0a0d0'] + assert rconf.get_parameter("enable_auth_events") is False + + +def test_set_auth_event_collection_error_handling(cbcsdk_mock): + """Tests the error handling in set_auth_event_collection (actually in set_data_collection).""" + def on_put(url, body, **kwargs): + assert body == SET_AUTH_EVENT_COLLECTION_REQUEST + return copy.deepcopy(SET_AUTH_EVENT_COLLECTION_RESPONSE_ERROR) + + cbcsdk_mock.mock_request('PUT', '/policyservice/v1/orgs/test/policies/65536/rule_configs/data_collection', on_put) + api = cbcsdk_mock.api + policy = Policy(api, 65536, copy.deepcopy(FULL_POLICY_1), False, True) + with pytest.raises(ApiError) as err: + policy.set_auth_event_collection(False) + assert err.value.args[0] == "Test error" + + +@pytest.mark.parametrize("element", [ + {"id": 10240, "position": 1}, + [10240, 1], + (10240, 1) +]) +def test_preview_policy_rank_changes(cbcsdk_mock, element): + """Tests the preview_policy_rank_changes function on the Policy class.""" + def on_post(uri, body, **kwargs): + assert body == PREVIEW_POLICY_CHANGES_REQUEST1 + return PREVIEW_POLICY_CHANGES_RESPONSE1 + + cbcsdk_mock.mock_request('POST', '/policy-assignment/v1/orgs/test/policies/preview', on_post) + api = cbcsdk_mock.api + results = Policy.preview_policy_rank_changes(api, [element]) + assert len(results) == 2 + assert results[0].current_policy_id == 70722 + assert results[0].current_policy_position == 2 + assert results[0].new_policy_id == 10240 + assert results[0].new_policy_position == 1 + assert results[0].asset_count == 5 + assert results[1].current_policy_id == 142857 + assert results[1].current_policy_position == 1 + assert results[1].new_policy_id == 10240 + assert results[1].new_policy_position == 1 + assert results[1].asset_count == 2 + + +def test_preview_rank_change(cbcsdk_mock): + """Tests the preview_rank_change function on the policy class.""" + def on_post(uri, body, **kwargs): + assert body == PREVIEW_POLICY_CHANGES_REQUEST2 + return PREVIEW_POLICY_CHANGES_RESPONSE2 + + cbcsdk_mock.mock_request('GET', '/policyservice/v1/orgs/test/policies/65536', FULL_POLICY_1) + cbcsdk_mock.mock_request('POST', '/policy-assignment/v1/orgs/test/policies/preview', on_post) + api = cbcsdk_mock.api + policy = api.select(Policy, 65536) + results = policy.preview_rank_change(1) + assert results[0].current_policy_id == 1492 + assert results[0].current_policy_position == 2 + assert results[0].new_policy_id == 65536 + assert results[0].new_policy_position == 1 + assert results[0].asset_count == 5 + assert results[1].current_policy_id == 74656 + assert results[1].current_policy_position == 1 + assert results[1].new_policy_id == 65536 + assert results[1].new_policy_position == 1 + assert results[1].asset_count == 2 + + +def test_device_policy_change_preview_helper_methods(cbcsdk_mock): + """Tests the helper methods on the DevicePolicyChangePreview object.""" + cbcsdk_mock.mock_request('GET', '/policyservice/v1/orgs/test/policies/65536', FULL_POLICY_1) + cbcsdk_mock.mock_request('GET', '/policyservice/v1/orgs/test/policies/1492', FULL_POLICY_5) + api = cbcsdk_mock.api + preview = DevicePolicyChangePreview(api, PREVIEW_POLICY_CHANGES_RESPONSE2['preview'][0]) + policy = preview.current_policy + assert policy.id == 1492 + policy = preview.new_policy + assert policy.id == 65536 + query = preview.asset_query + assert isinstance(query, DeviceSearchQuery) + request = query._build_request(-1, -1) + assert request['query'] == "(-_exists_:ag_agg_key_dynamic AND ag_agg_key_manual:1790b51e683c8a20c2b2bbe3e41eacdc53e3632087bb5a3f2868588e99157b06 AND policy_override:false) OR (-_exists_:ag_agg_key_dynamic AND ag_agg_key_manual:aa8bd7e69c4ee45918bb126a17d90a1c8368b46f9bb5bf430cb0250c317cd1dc AND policy_override:false)" # noqa: E501 + + +def test_preview_add_policy_override(cbcsdk_mock): + """Tests the preview_add_policy_override method.""" + def on_post(url, body, **kwargs): + assert body == ADD_POLICY_OVERRIDE_REQUEST + return ADD_POLICY_OVERRIDE_RESPONSE + + cbcsdk_mock.mock_request('GET', '/policyservice/v1/orgs/test/policies/65536', FULL_POLICY_1) + cbcsdk_mock.mock_request("POST", "/policy-assignment/v1/orgs/test/asset-groups/preview", on_post) + api = cbcsdk_mock.api + policy = api.select(Policy, 65536) + results = policy.preview_add_policy_override([123, 456, 789]) + assert len(results) == 1 + assert results[0].current_policy_id == 11200 + assert results[0].new_policy_id == 65536 + assert results[0].asset_count == 3 diff --git a/src/tests/unit/test_credentials.py b/src/tests/unit/test_credentials.py index ed5a81627..17c377a79 100755 --- a/src/tests/unit/test_credentials.py +++ b/src/tests/unit/test_credentials.py @@ -31,6 +31,7 @@ def test_credential_default_values(): assert creds.proxy is None assert not creds.ignore_system_proxy assert creds.integration is None + assert creds.default_timeout == 300000 with pytest.raises(AttributeError): assert creds.notexist is None @@ -40,10 +41,11 @@ def test_credential_default_values(): CredentialValue.ORG_KEY: "A1B2C3D4", CredentialValue.SSL_VERIFY: False, CredentialValue.SSL_VERIFY_HOSTNAME: False, CredentialValue.SSL_CERT_FILE: "foo.certs", CredentialValue.SSL_FORCE_TLS_1_2: True, CredentialValue.PROXY: "proxy.example", - CredentialValue.IGNORE_SYSTEM_PROXY: True, CredentialValue.INTEGRATION: 'Bronski'}, ), + CredentialValue.IGNORE_SYSTEM_PROXY: True, CredentialValue.INTEGRATION: 'Bronski', + CredentialValue.DEFAULT_TIMEOUT: 200000}, ), ({"url": "http://example.com", "token": "ABCDEFGH", "org_key": "A1B2C3D4", "ssl_verify": "false", "ssl_verify_hostname": "no", "ssl_cert_file": "foo.certs", "ssl_force_tls_1_2": "1", - "proxy": "proxy.example", "ignore_system_proxy": "on", "integration": 'Bronski'}, ) + "proxy": "proxy.example", "ignore_system_proxy": "on", "integration": 'Bronski', "default_timeout": "200000"}, ) ]) def test_credential_dict_value_load(input_dict): """Test loading credentials from a dict, and also access through both attributes and get_value.""" @@ -58,6 +60,7 @@ def test_credential_dict_value_load(input_dict): assert creds.proxy == "proxy.example" assert creds.ignore_system_proxy assert creds.integration == 'Bronski' + assert creds.default_timeout == 200000 assert creds.get_value(CredentialValue.URL) == "http://example.com" assert creds.get_value(CredentialValue.TOKEN) == "ABCDEFGH" assert creds.get_value(CredentialValue.ORG_KEY) == "A1B2C3D4" @@ -68,11 +71,12 @@ def test_credential_dict_value_load(input_dict): assert creds.get_value(CredentialValue.PROXY) == "proxy.example" assert creds.get_value(CredentialValue.IGNORE_SYSTEM_PROXY) assert creds.get_value(CredentialValue.INTEGRATION) == 'Bronski' + assert creds.get_value(CredentialValue.DEFAULT_TIMEOUT) == 200000 def test_credential_partial_loads(): """Test that we can have credentials with some values from dict and some default.""" - init_dict = {"url": "http://example.com", "ssl_verify": 0} + init_dict = {"url": "http://example.com", "ssl_verify": 0, "default_timeout": 999999} creds = Credentials(init_dict) assert creds.url == "http://example.com" assert creds.token is None @@ -84,6 +88,7 @@ def test_credential_partial_loads(): assert creds.proxy is None assert not creds.ignore_system_proxy assert creds.integration is None + assert creds.default_timeout == 300000 def test_credential_boolean_parsing_failure(): @@ -98,10 +103,11 @@ def test_credential_boolean_parsing_failure(): CredentialValue.ORG_KEY: "A1B2C3D4", CredentialValue.SSL_VERIFY: False, CredentialValue.SSL_VERIFY_HOSTNAME: False, CredentialValue.SSL_CERT_FILE: "foo.certs", CredentialValue.SSL_FORCE_TLS_1_2: True, CredentialValue.PROXY: "proxy.example", - CredentialValue.IGNORE_SYSTEM_PROXY: True, CredentialValue.INTEGRATION: 'Bronski'}, ), + CredentialValue.IGNORE_SYSTEM_PROXY: True, CredentialValue.INTEGRATION: 'Bronski', + CredentialValue.DEFAULT_TIMEOUT: 200000}, ), ({"url": "http://example.com", "token": "ABCDEFGH", "org_key": "A1B2C3D4", "ssl_verify": "false", "ssl_verify_hostname": "no", "ssl_cert_file": "foo.certs", "ssl_force_tls_1_2": "1", - "proxy": "proxy.example", "ignore_system_proxy": "on", "integration": 'Bronski'}, ) + "proxy": "proxy.example", "ignore_system_proxy": "on", "integration": 'Bronski', "default_timeout": 200000}, ) ]) def test_credential_get_dict(input_dict): """Tests if we get the correct dictionary.""" @@ -115,6 +121,7 @@ def test_credential_get_dict(input_dict): assert creds["ssl_force_tls_1_2"] assert creds["proxy"] == "proxy.example" assert creds["ignore_system_proxy"] + assert creds["default_timeout"] == 200000 def test_get_token_api_key():