Skip to content

Commit c163205

Browse files
author
Balazs Gibizer
committed
Remove compute service level check for qos ops
To support move operations with qos ports both the source and the destination compute hosts need to be on Ussuri level. We have service level checks implemented in Ussuri. In Victoria we could remove those checks as nova only supports compatibility between N and N-1 computes. But we kept them there just for extra safety. In the meanwhile we codified [1] the rule that nova does not support N-2 computes any more. So in Wallaby we can assume that the oldest compute is already on Victoria (Ussuri would be enough too). So this patch removes the unnecessary service level checks and related test cases. [1] Ie15ec8299ae52ae8f5334d591ed3944e9585cf71 Change-Id: I14177e35b9d6d27d49e092604bf0f288cd05f57e
1 parent be752b8 commit c163205

File tree

16 files changed

+9
-1138
lines changed

16 files changed

+9
-1138
lines changed

nova/api/openstack/common.py

Lines changed: 0 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -28,12 +28,9 @@
2828
from nova.compute import task_states
2929
from nova.compute import vm_states
3030
import nova.conf
31-
from nova import context as nova_context
3231
from nova import exception
3332
from nova.i18n import _
34-
from nova.network import constants
3533
from nova import objects
36-
from nova.objects import service
3734
from nova import quota
3835
from nova import utils
3936

@@ -557,35 +554,3 @@ def supports_port_resource_request(req):
557554
port resource request support, False otherwise.
558555
"""
559556
return api_version_request.is_supported(req, '2.72')
560-
561-
562-
def supports_port_resource_request_during_move():
563-
"""Check to see if the global compute service version is high enough to
564-
support port resource request during move operation.
565-
566-
:returns: True if the compute service version is high enough for
567-
port resource request move support, False otherwise.
568-
"""
569-
return service.get_minimum_version_all_cells(
570-
nova_context.get_admin_context(), ['nova-compute']) >= 49
571-
572-
573-
def instance_has_port_with_resource_request(instance_uuid, network_api):
574-
575-
# TODO(gibi): Use instance.info_cache to see if there is VIFs with
576-
# allocation key in the profile. If there is no such VIF for an instance
577-
# and the instance is not shelve offloaded then we can be sure that the
578-
# instance has no port with resource request. If the instance is shelve
579-
# offloaded then we still have to hit neutron.
580-
search_opts = {'device_id': instance_uuid,
581-
'fields': [constants.RESOURCE_REQUEST]}
582-
# NOTE(gibi): We need to use an admin context to query neutron ports as
583-
# neutron does not fill the resource_request field in the port response if
584-
# we query with a non admin context.
585-
admin_context = nova_context.get_admin_context()
586-
ports = network_api.list_ports(
587-
admin_context, **search_opts).get('ports', [])
588-
for port in ports:
589-
if port.get(constants.RESOURCE_REQUEST):
590-
return True
591-
return False

nova/api/openstack/compute/evacuate.py

Lines changed: 0 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -120,22 +120,6 @@ def _evacuate(self, req, id, body):
120120
msg = _("The target host can't be the same one.")
121121
raise exc.HTTPBadRequest(explanation=msg)
122122

123-
# We could potentially move this check to conductor and avoid the
124-
# extra API call to neutron when we support move operations with ports
125-
# having resource requests.
126-
if (common.instance_has_port_with_resource_request(
127-
instance.uuid, self.network_api) and not
128-
common.supports_port_resource_request_during_move()):
129-
LOG.warning("The evacuate action on a server with ports "
130-
"having resource requests, like a port with a QoS "
131-
"minimum bandwidth policy, is not supported until "
132-
"every nova-compute is upgraded to Ussuri")
133-
msg = _("The evacuate action on a server with ports having "
134-
"resource requests, like a port with a QoS minimum "
135-
"bandwidth policy, is not supported by this cluster right "
136-
"now")
137-
raise exc.HTTPBadRequest(explanation=msg)
138-
139123
try:
140124
self.compute_api.evacuate(context, instance, host,
141125
on_shared_storage, password, force)

nova/api/openstack/compute/migrate_server.py

Lines changed: 0 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -27,13 +27,10 @@
2727
from nova import exception
2828
from nova.i18n import _
2929
from nova.network import neutron
30-
from nova import objects
3130
from nova.policies import migrate_server as ms_policies
3231

3332
LOG = logging.getLogger(__name__)
3433

35-
MIN_COMPUTE_MOVE_BANDWIDTH = 39
36-
3734

3835
class MigrateServerController(wsgi.Controller):
3936
def __init__(self):
@@ -59,19 +56,6 @@ def _migrate(self, req, id, body):
5956
body['migrate'] is not None):
6057
host_name = body['migrate'].get('host')
6158

62-
if common.instance_has_port_with_resource_request(
63-
instance.uuid, self.network_api):
64-
# TODO(gibi): Remove when nova only supports compute newer than
65-
# Train
66-
source_service = objects.Service.get_by_host_and_binary(
67-
context, instance.host, 'nova-compute')
68-
if source_service.version < MIN_COMPUTE_MOVE_BANDWIDTH:
69-
msg = _("The migrate action on a server with ports having "
70-
"resource requests, like a port with a QoS "
71-
"minimum bandwidth policy, is not yet supported "
72-
"on the source compute")
73-
raise exc.HTTPConflict(explanation=msg)
74-
7559
try:
7660
self.compute_api.resize(req.environ['nova.context'], instance,
7761
host_name=host_name)
@@ -134,22 +118,6 @@ def _migrate_live(self, req, id, body):
134118
disk_over_commit = strutils.bool_from_string(disk_over_commit,
135119
strict=True)
136120

137-
# We could potentially move this check to conductor and avoid the
138-
# extra API call to neutron when we support move operations with ports
139-
# having resource requests.
140-
if (common.instance_has_port_with_resource_request(
141-
instance.uuid, self.network_api) and not
142-
common.supports_port_resource_request_during_move()):
143-
LOG.warning("The os-migrateLive action on a server with ports "
144-
"having resource requests, like a port with a QoS "
145-
"minimum bandwidth policy, is not supported until "
146-
"every nova-compute is upgraded to Ussuri")
147-
msg = _("The os-migrateLive action on a server with ports having "
148-
"resource requests, like a port with a QoS minimum "
149-
"bandwidth policy, is not supported by this cluster right "
150-
"now")
151-
raise exc.HTTPBadRequest(explanation=msg)
152-
153121
try:
154122
self.compute_api.live_migrate(context, instance, block_migration,
155123
disk_over_commit, host, force,

nova/api/openstack/compute/servers.py

Lines changed: 0 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -92,8 +92,6 @@
9292
exception.InvalidMixedInstanceDedicatedMask,
9393
)
9494

95-
MIN_COMPUTE_MOVE_BANDWIDTH = 39
96-
9795

9896
class ServersController(wsgi.Controller):
9997
"""The Server API base controller class for the OpenStack API."""
@@ -946,18 +944,6 @@ def _resize(self, req, instance_id, flavor_id, auto_disk_config=None):
946944
target={'user_id': instance.user_id,
947945
'project_id': instance.project_id})
948946

949-
if common.instance_has_port_with_resource_request(
950-
instance_id, self.network_api):
951-
# TODO(gibi): Remove when nova only supports compute newer than
952-
# Train
953-
source_service = objects.Service.get_by_host_and_binary(
954-
context, instance.host, 'nova-compute')
955-
if source_service.version < MIN_COMPUTE_MOVE_BANDWIDTH:
956-
msg = _("The resize action on a server with ports having "
957-
"resource requests, like a port with a QoS "
958-
"minimum bandwidth policy, is not yet supported.")
959-
raise exc.HTTPConflict(explanation=msg)
960-
961947
try:
962948
self.compute_api.resize(context, instance, flavor_id,
963949
auto_disk_config=auto_disk_config)

nova/api/openstack/compute/shelve.py

Lines changed: 0 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -23,9 +23,7 @@
2323
from nova.api.openstack import wsgi
2424
from nova.api import validation
2525
from nova.compute import api as compute
26-
from nova.compute import vm_states
2726
from nova import exception
28-
from nova.i18n import _
2927
from nova.network import neutron
3028
from nova.policies import shelve as shelve_policies
3129

@@ -99,23 +97,6 @@ def _unshelve(self, req, id, body):
9997
if support_az and unshelve_dict:
10098
new_az = unshelve_dict['availability_zone']
10199

102-
# We could potentially move this check to conductor and avoid the
103-
# extra API call to neutron when we support move operations with ports
104-
# having resource requests.
105-
if (instance.vm_state == vm_states.SHELVED_OFFLOADED and
106-
common.instance_has_port_with_resource_request(
107-
instance.uuid, self.network_api) and
108-
not common.supports_port_resource_request_during_move()):
109-
LOG.warning("The unshelve action on a server with ports having "
110-
"resource requests, like a port with a QoS minimum "
111-
"bandwidth policy, is not supported until every "
112-
"nova-compute is upgraded to Ussuri")
113-
msg = _("The unshelve action on a server with ports having "
114-
"resource requests, like a port with a QoS minimum "
115-
"bandwidth policy, is not supported by this cluster right "
116-
"now")
117-
raise exc.HTTPBadRequest(explanation=msg)
118-
119100
try:
120101
self.compute_api.unshelve(context, instance, new_az=new_az)
121102
except (exception.InstanceIsLocked,

nova/compute/rpcapi.py

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -934,13 +934,6 @@ def pre_live_migration(self, ctxt, instance, block_migration, disk,
934934
block_migration=block_migration,
935935
disk=disk, migrate_data=migrate_data)
936936

937-
def supports_resize_with_qos_port(self, ctxt):
938-
"""Returns whether we can send 5.2, needed for migrating and resizing
939-
servers with ports having resource request.
940-
"""
941-
client = self.router.client(ctxt)
942-
return client.can_send_version('5.2')
943-
944937
# TODO(mriedem): Drop compat for request_spec being a legacy dict in v6.0.
945938
def prep_resize(self, ctxt, instance, image, instance_type, host,
946939
migration, request_spec, filter_properties, node,

nova/conductor/tasks/migrate.py

Lines changed: 2 additions & 113 deletions
Original file line numberDiff line numberDiff line change
@@ -227,106 +227,6 @@ def _is_selected_host_in_source_cell(self, selection):
227227
instance=self.instance)
228228
return same_cell
229229

230-
def _support_resource_request(self, selection):
231-
"""Returns true if the host is new enough to support resource request
232-
during migration and that the RPC API version is not pinned during
233-
rolling upgrade.
234-
"""
235-
svc = objects.Service.get_by_host_and_binary(
236-
self.context, selection.service_host, 'nova-compute')
237-
return (svc.version >= 39 and
238-
self.compute_rpcapi.supports_resize_with_qos_port(
239-
self.context))
240-
241-
# TODO(gibi): Remove this compat code when nova doesn't need to support
242-
# Train computes any more.
243-
def _get_host_supporting_request(self, selection_list):
244-
"""Return the first compute selection from the selection_list where
245-
the service is new enough to support resource request during migration
246-
and the resources claimed successfully.
247-
248-
:param selection_list: a list of Selection objects returned by the
249-
scheduler
250-
:return: A two tuple. The first item is a Selection object
251-
representing the host that supports the request. The second item
252-
is a list of Selection objects representing the remaining alternate
253-
hosts.
254-
:raises MaxRetriesExceeded: if none of the hosts in the selection_list
255-
is new enough to support the request or we cannot claim resource
256-
on any of the hosts that are new enough.
257-
"""
258-
259-
if not self.request_spec.requested_resources:
260-
return selection_list[0], selection_list[1:]
261-
262-
# Scheduler allocated resources on the first host. So check if the
263-
# first host is new enough
264-
if self._support_resource_request(selection_list[0]):
265-
return selection_list[0], selection_list[1:]
266-
267-
# First host is old, so we need to use an alternate. Therefore we have
268-
# to remove the allocation from the first host.
269-
self.reportclient.delete_allocation_for_instance(
270-
self.context, self.instance.uuid)
271-
LOG.debug(
272-
'Scheduler returned host %(host)s as a possible migration target '
273-
'but that host is not new enough to support the migration with '
274-
'resource request %(request)s or the compute RPC is pinned to '
275-
'less than 5.2. Trying alternate hosts.',
276-
{'host': selection_list[0].service_host,
277-
'request': self.request_spec.requested_resources},
278-
instance=self.instance)
279-
280-
alternates = selection_list[1:]
281-
282-
for i, selection in enumerate(alternates):
283-
if self._support_resource_request(selection):
284-
# this host is new enough so we need to try to claim resources
285-
# on it
286-
if selection.allocation_request:
287-
alloc_req = jsonutils.loads(
288-
selection.allocation_request)
289-
resource_claimed = scheduler_utils.claim_resources(
290-
self.context, self.reportclient, self.request_spec,
291-
self.instance.uuid, alloc_req,
292-
selection.allocation_request_version)
293-
294-
if not resource_claimed:
295-
LOG.debug(
296-
'Scheduler returned alternate host %(host)s as a '
297-
'possible migration target but resource claim '
298-
'failed on that host. Trying another alternate.',
299-
{'host': selection.service_host},
300-
instance=self.instance)
301-
else:
302-
return selection, alternates[i + 1:]
303-
304-
else:
305-
# Some deployments use different schedulers that do not
306-
# use Placement, so they will not have an
307-
# allocation_request to claim with. For those cases,
308-
# there is no concept of claiming, so just assume that
309-
# the resources are available.
310-
return selection, alternates[i + 1:]
311-
312-
else:
313-
LOG.debug(
314-
'Scheduler returned alternate host %(host)s as a possible '
315-
'migration target but that host is not new enough to '
316-
'support the migration with resource request %(request)s '
317-
'or the compute RPC is pinned to less than 5.2. '
318-
'Trying another alternate.',
319-
{'host': selection.service_host,
320-
'request': self.request_spec.requested_resources},
321-
instance=self.instance)
322-
323-
# if we reach this point then none of the hosts was new enough for the
324-
# request or we failed to claim resources on every alternate
325-
reason = ("Exhausted all hosts available during compute service level "
326-
"check for instance %(instance_uuid)s." %
327-
{"instance_uuid": self.instance.uuid})
328-
raise exception.MaxRetriesExceeded(reason=reason)
329-
330230
def _execute(self):
331231
# NOTE(sbauza): Force_hosts/nodes needs to be reset if we want to make
332232
# sure that the next destination is not forced to be the original host.
@@ -436,8 +336,8 @@ def _schedule(self):
436336
# just need the first returned element.
437337
selection_list = selection_lists[0]
438338

439-
selection, self.host_list = self._get_host_supporting_request(
440-
selection_list)
339+
# Scheduler allocated resources on the first host so try that first
340+
selection, self.host_list = selection_list[0], selection_list[1:]
441341

442342
scheduler_utils.fill_provider_mapping(self.request_spec, selection)
443343
return selection
@@ -452,17 +352,6 @@ def _reschedule(self):
452352
selection = None
453353
while self.host_list and not host_available:
454354
selection = self.host_list.pop(0)
455-
if (self.request_spec.requested_resources and not
456-
self._support_resource_request(selection)):
457-
LOG.debug(
458-
'Scheduler returned alternate host %(host)s as a possible '
459-
'migration target for re-schedule but that host is not '
460-
'new enough to support the migration with resource '
461-
'request %(request)s. Trying another alternate.',
462-
{'host': selection.service_host,
463-
'request': self.request_spec.requested_resources},
464-
instance=self.instance)
465-
continue
466355
if selection.allocation_request:
467356
alloc_req = jsonutils.loads(selection.allocation_request)
468357
else:

0 commit comments

Comments
 (0)