From 2e9fd01e7a5260f02db4df5660be25b3fabf8e6b Mon Sep 17 00:00:00 2001 From: Balazs Gibizer Date: Tue, 23 Apr 2019 13:30:35 +0200 Subject: [PATCH] re-calculate provider mapping during migration Nova intentionally does not persist the resoruce request of the neturon ports. Therefore during migration nova needs to query neturon about the resource requests to include them to the allocation_candidates query sent to placement during scheduling. Also when the allocation is made by the scheduler nova needs to re-calculate request group - resource provider mapping. A subsequent patch will use this mapping to update the binding profile when the port is bound to the destination host of the migration. blueprint: support-move-ops-with-qos-ports Change-Id: I8e5a0480c81ba548bc1f50a8098eabac52b11453 --- nova/conductor/tasks/migrate.py | 16 +++++++++++++-- nova/tests/functional/test_servers.py | 14 ++++++++++++- .../unit/conductor/tasks/test_migrate.py | 20 +++++++++++++++++-- 3 files changed, 45 insertions(+), 5 deletions(-) diff --git a/nova/conductor/tasks/migrate.py b/nova/conductor/tasks/migrate.py index f4803e44be0..636233415b8 100644 --- a/nova/conductor/tasks/migrate.py +++ b/nova/conductor/tasks/migrate.py @@ -203,8 +203,12 @@ def _execute(self): # is not forced to be the original host self.request_spec.reset_forced_destinations() - # TODO(gibi): We need to make sure that the requested_resources field - # is re calculated based on neutron ports. + port_res_req = self.network_api.get_requested_resource_for_instance( + self.context, self.instance.uuid) + # NOTE(gibi): When cyborg or other module wants to handle similar + # non-nova resources then here we have to collect all the external + # resource requests in a single list and add them to the RequestSpec. + self.request_spec.requested_resources = port_res_req self._restrict_request_spec_to_cell(legacy_props) @@ -242,6 +246,10 @@ def _execute(self): # The selected host is the first item in the list, with the # alternates being the remainder of the list. selection, self.host_list = selection_list[0], selection_list[1:] + + scheduler_utils.fill_provider_mapping( + self.context, self.reportclient, self.request_spec, selection) + else: # This is a reschedule that will use the supplied alternate hosts # in the host_list as destinations. Since the resources on these @@ -264,6 +272,10 @@ def _execute(self): elevated, self.reportclient, self.request_spec, self.instance.uuid, alloc_req, selection.allocation_request_version) + if host_available: + scheduler_utils.fill_provider_mapping( + self.context, self.reportclient, self.request_spec, + selection) else: # Some deployments use different schedulers that do not # use Placement, so they will not have an diff --git a/nova/tests/functional/test_servers.py b/nova/tests/functional/test_servers.py index fbb19c855e7..96d9036fd24 100644 --- a/nova/tests/functional/test_servers.py +++ b/nova/tests/functional/test_servers.py @@ -3170,7 +3170,10 @@ def test_shelve_unshelve_after_server_created_with_host(self): self.assertEqual(dest_hostname, inst_dest_host) - def _test_resize_reschedule_uses_host_lists(self, fails, num_alts=None): + @mock.patch.object(utils, 'fill_provider_mapping', + wraps=utils.fill_provider_mapping) + def _test_resize_reschedule_uses_host_lists(self, mock_fill_provider_map, + fails, num_alts=None): """Test that when a resize attempt fails, the retry comes from the supplied host_list, and does not call the scheduler. """ @@ -3226,6 +3229,15 @@ def fake_prep_resize(*args, **kwargs): data = {"resize": {"flavorRef": self.flavor2["id"]}} self.api.post_server_action(server_uuid, data) + # fill_provider_mapping should have been called once for the initial + # build, once for the resize scheduling to the primary host and then + # once per reschedule. + expected_fill_count = 2 + if num_alts > 1: + expected_fill_count += self.num_fails - 1 + self.assertGreaterEqual(mock_fill_provider_map.call_count, + expected_fill_count) + if num_alts < fails: # We will run out of alternates before populate_retry will # raise a MaxRetriesExceeded exception, so the migration will diff --git a/nova/tests/unit/conductor/tasks/test_migrate.py b/nova/tests/unit/conductor/tasks/test_migrate.py index 0e6aae11dbc..f9da5b3abe4 100644 --- a/nova/tests/unit/conductor/tasks/test_migrate.py +++ b/nova/tests/unit/conductor/tasks/test_migrate.py @@ -88,6 +88,9 @@ def _test_execute(self, prep_resize_mock, sel_dest_mock, sig_mock, az_mock, sel_dest_mock.return_value = self.host_lists az_mock.return_value = 'myaz' gbf_mock.return_value = objects.MigrationList() + mock_get_resources = \ + self.mock_network_api.get_requested_resource_for_instance + mock_get_resources.return_value = [] if requested_destination: self.request_spec.requested_destination = objects.Destination( @@ -112,7 +115,12 @@ def set_migration_uuid(*a, **k): # place to find it via self.migration. cn_mock.side_effect = set_migration_uuid - task.execute() + selection = self.host_lists[0][0] + with mock.patch.object(scheduler_utils, + 'fill_provider_mapping') as fill_mock: + task.execute() + fill_mock.assert_called_once_with( + task.context, task.reportclient, task.request_spec, selection) self.ensure_network_metadata_mock.assert_called_once_with( self.instance) @@ -122,7 +130,6 @@ def set_migration_uuid(*a, **k): task.query_client.select_destinations.assert_called_once_with( self.context, self.request_spec, [self.instance.uuid], return_objects=True, return_alternates=True) - selection = self.host_lists[0][0] prep_resize_mock.assert_called_once_with( self.context, self.instance, self.request_spec.image, self.flavor, selection.service_host, task._migration, @@ -152,6 +159,10 @@ def set_migration_uuid(*a, **k): self.assertIn('cell', self.request_spec.requested_destination) self.assertIsNotNone(self.request_spec.requested_destination.cell) + mock_get_resources.assert_called_once_with( + self.context, self.instance.uuid) + self.assertEqual([], self.request_spec.requested_resources) + def test_execute(self): self._test_execute() @@ -182,6 +193,9 @@ def test_execute_rollback(self, prep_resize_mock, sel_dest_mock, sig_mock, task = self._generate_task() gmv_mock.return_value = 23 mock_gbf.return_value = objects.MigrationList() + mock_get_resources = \ + self.mock_network_api.get_requested_resource_for_instance + mock_get_resources.return_value = [] # We just need this hook point to set a uuid on the # migration before we use it for teardown @@ -203,6 +217,8 @@ def set_migration_uuid(*a, **k): self.assertEqual('error', task._migration.status) mock_ra.assert_called_once_with(task.context, task._source_cn, task.instance, task._migration) + mock_get_resources.assert_called_once_with( + self.context, self.instance.uuid) class MigrationTaskAllocationUtils(test.NoDBTestCase):