diff --git a/.github/workflows/unit_tests.yml b/.github/workflows/unit_tests.yml index 987699bda..41e061589 100644 --- a/.github/workflows/unit_tests.yml +++ b/.github/workflows/unit_tests.yml @@ -12,7 +12,7 @@ jobs: strategy: matrix: # 3.9+ is not currently supported due to https://github.com/freach/udatetime/issues/32 - python-version: ["3.8", "3.9"] + python-version: ["3.8", "3.9", "3.10"] steps: - uses: actions/checkout@v5 - name: Set up Python ${{ matrix.python-version }} diff --git a/lib/apis/utils/__init__.py b/lib/apis/utils/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/lib/apis/utils/diff_table.py b/lib/apis/utils/diff_table.py new file mode 100644 index 000000000..4fa02739c --- /dev/null +++ b/lib/apis/utils/diff_table.py @@ -0,0 +1,74 @@ +import deepdiff +from deepdiff import extract + + +def diff_to_tabulate_table(obj1, obj2, excluded_paths=None): + """ + Formats the output from DeepDiff into a list of lists suitable for tabulate, + showing path, before, and after values. + """ + diff_output = deepdiff.DeepDiff( + t1=obj1, + t2=obj2, + exclude_paths=excluded_paths, + threshold_to_diff_deeper=0, + ignore_order=True, + ) + + table_data = [] + + # Handle 'values_changed' + if "values_changed" in diff_output: + for path, change_info in diff_output["values_changed"].items(): + table_data.append( + [ + path, + change_info.get("old_value", "Not Present"), + change_info.get("new_value", "Not Present"), + ] + ) + + # Handle 'dictionary_item_added' + if "dictionary_item_added" in diff_output: + for path in diff_output["dictionary_item_added"]: + table_data.append( + [ + path, + "Not Present", # No old value for added items + extract(obj2, path), + ] + ) + + # Handle 'dictionary_item_removed' + if "dictionary_item_removed" in diff_output: + for path in diff_output["dictionary_item_removed"]: + table_data.append( + [ + path, + extract(obj1, path), + "Not Present", # No new value for removed items + ] + ) + + # Handle 'iterable_item_added' + if "iterable_item_added" in diff_output: + for path, value in diff_output["iterable_item_added"].items(): + table_data.append([path, "N/A", value]) + + # Handle 'iterable_item_removed' + if "iterable_item_removed" in diff_output: + for path, value in diff_output["iterable_item_removed"].items(): + table_data.append([path, value, "N/A"]) + + # Handle 'type_changes' + if "type_changes" in diff_output: + for path, change_info in diff_output["type_changes"].items(): + table_data.append( + [ + path, + f"'{change_info.get('old_value')}' (Type: {change_info.get('old_type').__name__})", + f"'{change_info.get('new_value')}' (Type: {change_info.get('new_type').__name__})", + ] + ) + + return table_data diff --git a/rules/alert.aggregate.metadata.mismatch.yaml b/rules/alert.aggregate.metadata.mismatch.yaml new file mode 100644 index 000000000..3678aaba5 --- /dev/null +++ b/rules/alert.aggregate.metadata.mismatch.yaml @@ -0,0 +1,16 @@ +--- +name: "alert.aggregate.metadata.mismatch" +pack: "stackstorm_openstack" +description: "Sends Jira Ticket for Aggregate metadata mismatch between prod and dev clouds" +enabled: true + +trigger: + type: "stackstorm_openstack.aggregate.metadata_mismatch" + +action: + ref: "jira.create_issue" + parameters: + summary: "[Stackstorm Alert]: {{trigger.aggregate_name}} Aggregate metadata mismatch between prod and dev clouds" + type: Problem + description: > + {{trigger.diff}} diff --git a/sensors/aggregate.aggregate_list.yaml b/sensors/aggregate.aggregate_list.yaml deleted file mode 100644 index 8a797fe8a..000000000 --- a/sensors/aggregate.aggregate_list.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- -class_name: HostAggregateSensor -entry_point: src/host_aggregate_list_sensor.py -description: Detect changes in host aggregates -poll_interval: 86400 # 24 hours -enabled: false -trigger_types: - - name: "aggregate.aggregate_list" - description: "Triggers for aggregate list poll" - payload_schema: - type: "object" - properties: - dest_aggregates: - type: "array" - description: "List of destination (dev) aggregates" diff --git a/sensors/aggregate.metadata_mismatch.yaml b/sensors/aggregate.metadata_mismatch.yaml new file mode 100644 index 000000000..dc4f397cb --- /dev/null +++ b/sensors/aggregate.metadata_mismatch.yaml @@ -0,0 +1,15 @@ +--- +class_name: HostAggregateSensor +entry_point: src/host_aggregate_metadata_sensor.py +description: Detect changes in host aggregates +poll_interval: 604800 # 7 days +enabled: false +trigger_types: + - name: "aggregate.metadata_mismatch" + description: "Triggers for aggregate metadata mismatch" + payload_schema: + type: "object" + properties: + diff: + type: "string" + description: "diff between aggregate" diff --git a/sensors/src/host_aggregate_list_sensor.py b/sensors/src/host_aggregate_list_sensor.py deleted file mode 100644 index 486c2c2b4..000000000 --- a/sensors/src/host_aggregate_list_sensor.py +++ /dev/null @@ -1,73 +0,0 @@ -import json - -from apis.openstack_api.openstack_connection import OpenstackConnection -from st2reactor.sensor.base import PollingSensor - - -class HostAggregateSensor(PollingSensor): - """ - * self.sensor_service - - provides utilities like - get_logger() for writing to logs. - dispatch() for dispatching triggers into the system. - * self._config - - contains configuration that was specified as - config.yaml in the pack. - * self._poll_interval - - indicates the interval between two successive poll() calls. - """ - - def __init__(self, sensor_service, config=None, poll_interval=None): - super().__init__( - sensor_service=sensor_service, config=config, poll_interval=poll_interval - ) - self._log = self._sensor_service.get_logger(__name__) - self.dest_cloud_account = self.config["sensor_dest_cloud"] - - def setup(self): - """ - Stub - """ - - def poll(self): - """ - Polls the dev cloud host aggregates and dispatches a payload containing - a list of aggregates. - """ - with OpenstackConnection(self.dest_cloud_account) as conn: - self._log.info(f"Destination Cloud: {self.dest_cloud_account}") - self._log.info("Polling for destination aggregates.") - - # Returns a generator, consume it into a list and convert each item to a dict - dest_aggregates = [ - json.dumps(agg.to_dict()) for agg in conn.compute.aggregates() - ] - - self._log.info("Dispatching trigger for aggregate list.") - - payload = {"dest_aggregates": dest_aggregates} - - self.sensor_service.dispatch( - trigger="stackstorm_openstack.aggregate.aggregate_list", - payload=payload, - ) - - def cleanup(self): - """ - Stub - """ - - def add_trigger(self, trigger): - """ - Stub - """ - - def update_trigger(self, trigger): - """ - Stub - """ - - def remove_trigger(self, trigger): - """ - Stub - """ diff --git a/sensors/src/host_aggregate_metadata_sensor.py b/sensors/src/host_aggregate_metadata_sensor.py new file mode 100644 index 000000000..7f1168386 --- /dev/null +++ b/sensors/src/host_aggregate_metadata_sensor.py @@ -0,0 +1,124 @@ +import tabulate + +from apis.openstack_api.openstack_connection import OpenstackConnection +from apis.utils.diff_table import diff_to_tabulate_table +from st2reactor.sensor.base import PollingSensor + + +class HostAggregateSensor(PollingSensor): + """ + * self.sensor_service + - provides utilities like + get_logger() for writing to logs. + dispatch() for dispatching triggers into the system. + * self._config + - contains configuration that was specified as + config.yaml in the pack. + * self._poll_interval + - indicates the interval between two successive poll() calls. + """ + + def __init__(self, sensor_service, config=None, poll_interval=None): + super().__init__( + sensor_service=sensor_service, config=config, poll_interval=poll_interval + ) + self._log = self._sensor_service.get_logger(__name__) + self.source_cloud = self.config["sensor_source_cloud"] + self.target_cloud = self.config["sensor_dest_cloud"] + + def setup(self): + """ + Stub + """ + + def poll(self): + """ + Polls the dev cloud host aggregates and dispatches a payload containing + a list of aggregates. + """ + with OpenstackConnection(self.source_cloud) as source_conn, OpenstackConnection( + self.target_cloud + ) as target_conn: + + source_aggregates = { + agg.name: agg for agg in source_conn.compute.aggregates() + } + target_aggregates = { + agg.name: agg for agg in target_conn.compute.aggregates() + } + + self._log.info( + "Compare source (%s) and target (%s) host aggregate metadata" + ) + for aggregate_name, source_agg in source_aggregates.items(): + target_agg = target_aggregates.get(aggregate_name) + + if not target_agg: + self._log.info( + "aggregate %s doesn't exist in %s cloud", + aggregate_name, + self.target_cloud, + ) + continue + + diff = diff_to_tabulate_table( + obj1=source_agg, + obj2=target_agg, + excluded_paths=[ + "root['hosts']", + "root['created_at']", + "root['updated_at']", + "root['uuid']", + "root['id']", + "root['location']", + ], + ) + + if diff: + + self._log.info( + "aggregate metadata mismatch between source (%s) and target (%s): %s", + self.source_cloud, + self.target_cloud, + aggregate_name, + ) + + headers = [ + "Path", + self.source_cloud, + self.target_cloud, + ] + + payload = { + "aggregate_name": source_agg.name, + "diff": tabulate.tabulate( + diff, + headers=headers, + tablefmt="jira", + ), + } + + self.sensor_service.dispatch( + trigger="stackstorm_openstack.aggregate.metadata_mismatch", + payload=payload, + ) + + def cleanup(self): + """ + Stub + """ + + def add_trigger(self, trigger): + """ + Stub + """ + + def update_trigger(self, trigger): + """ + Stub + """ + + def remove_trigger(self, trigger): + """ + Stub + """ diff --git a/tests/lib/apis/utils/test_diff_table.py b/tests/lib/apis/utils/test_diff_table.py new file mode 100644 index 000000000..a300ce253 --- /dev/null +++ b/tests/lib/apis/utils/test_diff_table.py @@ -0,0 +1,165 @@ +from unittest.mock import patch +import pytest + +from apis.utils.diff_table import diff_to_tabulate_table + + +@pytest.fixture(name="mock_object1") +def mock_object1_fixture(): + return { + "id": 1, + "metadata": {"cores": 6, "disk": 50}, + "tags": ["tag3"], + "name": "test1", + } + + +@pytest.fixture(name="mock_object2") +def mock_object2_fixture(): + return { + "id": 2, + "metadata": {"cores": 4, "gpus": 10}, + "tags": ["tag1", "tag2"], + "name": 123, + } + + +def test_diff(mock_object1, mock_object2): + with patch("deepdiff.DeepDiff") as mock_deepdiff: + diff_to_tabulate_table(mock_object1, mock_object2) + mock_deepdiff.assert_called_once_with( + t1=mock_object1, + t2=mock_object2, + exclude_paths=None, + threshold_to_diff_deeper=0, + ignore_order=True, + ) + + +def test_diff_with_exclude_paths(mock_object1, mock_object2): + with patch("deepdiff.DeepDiff") as mock_deepdiff: + diff_to_tabulate_table(mock_object1, mock_object2, ["root['id']"]) + mock_deepdiff.assert_called_once_with( + t1=mock_object1, + t2=mock_object2, + exclude_paths=["root['id']"], + threshold_to_diff_deeper=0, + ignore_order=True, + ) + + +def test_no_diff(mock_object1, mock_object2): + with patch("deepdiff.DeepDiff") as mock_deepdiff: + mock_deepdiff.return_value = {} + res = diff_to_tabulate_table(mock_object1, mock_object2) + mock_deepdiff.assert_called_once_with( + t1=mock_object1, + t2=mock_object2, + exclude_paths=None, + threshold_to_diff_deeper=0, + ignore_order=True, + ) + assert not res + + +def test_diff_with_values_changed(mock_object1, mock_object2): + with patch("deepdiff.DeepDiff") as mock_deepdiff: + mock_deepdiff.return_value = { + "values_changed": {"root['id']": {"new_value": 2, "old_value": 1}} + } + res = diff_to_tabulate_table(mock_object1, mock_object2) + mock_deepdiff.assert_called_once_with( + t1=mock_object1, + t2=mock_object2, + exclude_paths=None, + threshold_to_diff_deeper=0, + ignore_order=True, + ) + assert res == [["root['id']", 1, 2]] + + +def test_diff_with_dictionary_item_added(mock_object1, mock_object2): + with patch("deepdiff.DeepDiff") as mock_deepdiff: + mock_deepdiff.return_value = { + "dictionary_item_added": ["root['metadata']['gpus']"] + } + res = diff_to_tabulate_table(mock_object1, mock_object2) + mock_deepdiff.assert_called_once_with( + t1=mock_object1, + t2=mock_object2, + exclude_paths=None, + threshold_to_diff_deeper=0, + ignore_order=True, + ) + assert res == [["root['metadata']['gpus']", "Not Present", 10]] + + +def test_diff_with_dictionary_item_removed(mock_object1, mock_object2): + with patch("deepdiff.DeepDiff") as mock_deepdiff: + mock_deepdiff.return_value = { + "dictionary_item_removed": ["root['metadata']['disk']"] + } + res = diff_to_tabulate_table(mock_object1, mock_object2) + mock_deepdiff.assert_called_once_with( + t1=mock_object1, + t2=mock_object2, + exclude_paths=None, + threshold_to_diff_deeper=0, + ignore_order=True, + ) + assert res == [["root['metadata']['disk']", 50, "Not Present"]] + + +def test_diff_with_iterable_item_added(mock_object1, mock_object2): + with patch("deepdiff.DeepDiff") as mock_deepdiff: + mock_deepdiff.return_value = { + "iterable_item_added": {"root['tags'][1]": "tag2"} + } + res = diff_to_tabulate_table(mock_object1, mock_object2) + mock_deepdiff.assert_called_once_with( + t1=mock_object1, + t2=mock_object2, + exclude_paths=None, + threshold_to_diff_deeper=0, + ignore_order=True, + ) + assert res == [["root['tags'][1]", "N/A", "tag2"]] + + +def test_diff_with_iterable_item_removed(mock_object1, mock_object2): + with patch("deepdiff.DeepDiff") as mock_deepdiff: + mock_deepdiff.return_value = { + "iterable_item_removed": {"root['tags'][1]": "tag2"} + } + res = diff_to_tabulate_table(mock_object1, mock_object2) + mock_deepdiff.assert_called_once_with( + t1=mock_object1, + t2=mock_object2, + exclude_paths=None, + threshold_to_diff_deeper=0, + ignore_order=True, + ) + assert res == [["root['tags'][1]", "tag2", "N/A"]] + + +def test_diff_with_type_changes(mock_object1, mock_object2): + with patch("deepdiff.DeepDiff") as mock_deepdiff: + mock_deepdiff.return_value = { + "type_changes": { + "root['name']": { + "old_type": str, + "new_type": int, + "old_value": "test1", + "new_value": 123, + } + } + } + res = diff_to_tabulate_table(mock_object1, mock_object2) + mock_deepdiff.assert_called_once_with( + t1=mock_object1, + t2=mock_object2, + exclude_paths=None, + threshold_to_diff_deeper=0, + ignore_order=True, + ) + assert res == [["root['name']", "'test1' (Type: str)", "'123' (Type: int)"]] diff --git a/tests/sensors/test_host_aggregate_list_sensor.py b/tests/sensors/test_host_aggregate_list_sensor.py deleted file mode 100644 index 07172afa2..000000000 --- a/tests/sensors/test_host_aggregate_list_sensor.py +++ /dev/null @@ -1,49 +0,0 @@ -import json -from unittest.mock import MagicMock, patch - -import pytest - -from sensors.src.host_aggregate_list_sensor import HostAggregateSensor - - -@pytest.fixture(name="sensor") -def aggregate_sensor_fixture(): - """ - Fixture for sensor config. - """ - return HostAggregateSensor( - sensor_service=MagicMock(), - config={ - "sensor_dest_cloud": "dev", - }, - poll_interval=10, - ) - - -@patch("sensors.src.host_aggregate_list_sensor.OpenstackConnection") -def test_poll(mock_openstack_connection, sensor): - """ - Test main function of sensor, polling the dev cloud aggregates and their properties. - """ - mock_conn = MagicMock() - mock_openstack_connection.return_value.__enter__.return_value = mock_conn - - mock_aggregate = MagicMock() - mock_aggregate.to_dict.return_value = { - "name": "aggregate1", - "availability_zone": "nova", - } - mock_conn.compute.aggregates.return_value = [mock_aggregate] - - sensor.poll() - - mock_conn.compute.aggregates.assert_called_once_with() - - expected_payload = { - "dest_aggregates": [json.dumps(mock_aggregate.to_dict.return_value)] - } - - sensor.sensor_service.dispatch.assert_called_once_with( - trigger="stackstorm_openstack.aggregate.aggregate_list", - payload=expected_payload, - ) diff --git a/tests/sensors/test_host_aggregate_metadata_sensor.py b/tests/sensors/test_host_aggregate_metadata_sensor.py new file mode 100644 index 000000000..cadd8c503 --- /dev/null +++ b/tests/sensors/test_host_aggregate_metadata_sensor.py @@ -0,0 +1,129 @@ +from unittest.mock import MagicMock, patch + +import pytest + +from sensors.src.host_aggregate_metadata_sensor import HostAggregateSensor + + +@pytest.fixture(name="sensor") +def aggregate_sensor_fixture(): + """ + Fixture for sensor config. + """ + return HostAggregateSensor( + sensor_service=MagicMock(), + config={ + "sensor_source_cloud": "dev", + "sensor_dest_cloud": "prod", + }, + poll_interval=10, + ) + + +@patch("sensors.src.host_aggregate_metadata_sensor.tabulate") +@patch("sensors.src.host_aggregate_metadata_sensor.diff_to_tabulate_table") +@patch("sensors.src.host_aggregate_metadata_sensor.OpenstackConnection") +def test_poll_mismatch( + mock_openstack_connection, mock_diff_to_tabulate_table, mock_tabulate, sensor +): + """ + Test main function of sensor, polling the dev cloud aggregates and their properties. + """ + mock_source_conn = MagicMock() + mock_target_conn = MagicMock() + + mock_openstack_connection.return_value.__enter__.side_effect = [ + mock_source_conn, + mock_target_conn, + ] + + mock_source_aggregate = MagicMock() + mock_source_aggregate.name = "aggregate1" + mock_source_aggregate.availability_zone = "nova" + + mock_target_aggregate = MagicMock() + mock_target_aggregate.name = "aggregate1" + mock_target_aggregate.availability_zone = "ceph" + + mock_source_conn.compute.aggregates.return_value = [mock_source_aggregate] + mock_target_conn.compute.aggregates.return_value = [mock_target_aggregate] + mock_diff = mock_diff_to_tabulate_table.return_value + + sensor.poll() + + mock_source_conn.compute.aggregates.assert_called_once_with() + mock_target_conn.compute.aggregates.assert_called_once_with() + + expected_payload = { + "aggregate_name": mock_source_aggregate.name, + "diff": mock_tabulate.tabulate( + mock_diff, + headers=["Path", "dev", "prod"], + tablefmt="jira", + ), + } + + sensor.sensor_service.dispatch.assert_called_once_with( + trigger="stackstorm_openstack.aggregate.metadata_mismatch", + payload=expected_payload, + ) + + +@patch("sensors.src.host_aggregate_metadata_sensor.OpenstackConnection") +def test_poll_match(mock_openstack_connection, sensor): + """ + Test main function of sensor, polling the dev cloud aggregates and their properties. + """ + mock_source_conn = MagicMock() + mock_target_conn = MagicMock() + + mock_openstack_connection.return_value.__enter__.side_effect = [ + mock_source_conn, + mock_target_conn, + ] + + mock_source_aggregate = MagicMock() + mock_source_aggregate.name = "aggregate1" + mock_source_aggregate.availability_zone = "nova" + + mock_source_conn.compute.aggregates.return_value = [mock_source_aggregate] + mock_target_conn.compute.aggregates.return_value = [mock_source_aggregate] + + sensor.poll() + + mock_source_conn.compute.aggregates.assert_called_once_with() + mock_target_conn.compute.aggregates.assert_called_once_with() + + sensor.sensor_service.dispatch.assert_not_called() + + +@patch("sensors.src.host_aggregate_metadata_sensor.OpenstackConnection") +def test_poll_not_exist_in_target(mock_openstack_connection, sensor): + """ + Test main function of sensor, polling the dev cloud aggregates and their properties. + """ + mock_source_conn = MagicMock() + mock_target_conn = MagicMock() + + mock_openstack_connection.return_value.__enter__.side_effect = [ + mock_source_conn, + mock_target_conn, + ] + + mock_source_aggregate = MagicMock() + mock_source_aggregate.name = "aggregate1" + mock_source_aggregate.availability_zone = "nova" + + mock_target_aggregate = MagicMock() + mock_target_aggregate.name = "aggregate2" + mock_target_aggregate.availability_zone = "ceph" + + mock_source_conn.compute.aggregates.return_value = [mock_source_aggregate] + mock_target_conn.compute.aggregates.return_value = [mock_target_aggregate] + + sensor.poll() + + mock_source_conn.compute.aggregates.assert_called_once_with() + mock_target_conn.compute.aggregates.assert_called_once_with() + + sensor.sensor_service.dispatch.assert_not_called()