Skip to content

Commit b4a52f8

Browse files
author
Balazs Gibizer
committed
Add remove_resources_from_instance_allocation to report client
The subsequent patch I0fb35036e77e9abe141fe8831b4c23d02e567b96 will need to modify the existing instance allocation by removing a set of resources. This patch adds the necessary report client code to be able to do that. Change-Id: I66d69327d3361825ca0b44b46744b97ea3069eb1 blueprint: bandwidth-resource-provider
1 parent 23444c2 commit b4a52f8

File tree

2 files changed

+546
-1
lines changed

2 files changed

+546
-1
lines changed

nova/scheduler/client/report.py

Lines changed: 127 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1624,6 +1624,133 @@ def claim_resources(self, context, consumer_uuid, alloc_request,
16241624
raise Retry('claim_resources', reason)
16251625
return r.status_code == 204
16261626

1627+
def remove_resources_from_instance_allocation(
1628+
self, context, consumer_uuid, resources):
1629+
"""Removes certain resources from the current allocation of the
1630+
consumer.
1631+
1632+
:param context: the request context
1633+
:param consumer_uuid: the uuid of the consumer to update
1634+
:param resources: a dict of resources. E.g.:
1635+
{
1636+
<rp_uuid>: {
1637+
<resource class>: amount
1638+
<other resource class>: amount
1639+
}
1640+
<other_ rp_uuid>: {
1641+
<other resource class>: amount
1642+
}
1643+
}
1644+
:raises AllocationUpdateFailed: if the requested resource cannot be
1645+
removed from the current allocation (e.g. rp is missing from
1646+
the allocation) or there was multiple generation conflict and
1647+
we run out of retires.
1648+
:raises ConsumerAllocationRetrievalFailed: If the current allocation
1649+
cannot be read from placement.
1650+
:raises: keystoneauth1.exceptions.base.ClientException on failure to
1651+
communicate with the placement API
1652+
"""
1653+
1654+
# NOTE(gibi): It is just a small wrapper to raise instead of return
1655+
# if we run out of retries.
1656+
if not self._remove_resources_from_instance_allocation(
1657+
context, consumer_uuid, resources):
1658+
error_reason = _("Cannot remove resources %s from the allocation "
1659+
"due to multiple successive generation conflicts "
1660+
"in placement.")
1661+
raise exception.AllocationUpdateFailed(
1662+
consumer_uuid=consumer_uuid,
1663+
error=error_reason % resources)
1664+
1665+
@retries
1666+
def _remove_resources_from_instance_allocation(
1667+
self, context, consumer_uuid, resources):
1668+
if not resources:
1669+
# Nothing to remove so do not query or update allocation in
1670+
# placement.
1671+
# The True value is only here because the retry decorator returns
1672+
# False when runs out of retries. It would be nicer to raise in
1673+
# that case too.
1674+
return True
1675+
1676+
current_allocs = self.get_allocs_for_consumer(context, consumer_uuid)
1677+
1678+
if not current_allocs['allocations']:
1679+
error_reason = _("Cannot remove resources %(resources)s from "
1680+
"allocation %(allocations)s. The allocation is "
1681+
"empty.")
1682+
raise exception.AllocationUpdateFailed(
1683+
consumer_uuid=consumer_uuid,
1684+
error=error_reason %
1685+
{'resources': resources, 'allocations': current_allocs})
1686+
1687+
try:
1688+
for rp_uuid, resources_to_remove in resources.items():
1689+
allocation_on_rp = current_allocs['allocations'][rp_uuid]
1690+
for rc, value in resources_to_remove.items():
1691+
allocation_on_rp['resources'][rc] -= value
1692+
1693+
if allocation_on_rp['resources'][rc] < 0:
1694+
error_reason = _(
1695+
"Cannot remove resources %(resources)s from "
1696+
"allocation %(allocations)s. There are not enough "
1697+
"allocated resources left on %(rp_uuid)s resource "
1698+
"provider to remove %(amount)d amount of "
1699+
"%(resource_class)s resources.")
1700+
raise exception.AllocationUpdateFailed(
1701+
consumer_uuid=consumer_uuid,
1702+
error=error_reason %
1703+
{'resources': resources,
1704+
'allocations': current_allocs,
1705+
'rp_uuid': rp_uuid,
1706+
'amount': value,
1707+
'resource_class': rc})
1708+
1709+
if allocation_on_rp['resources'][rc] == 0:
1710+
# if no allocation left for this rc then remove it
1711+
# from the allocation
1712+
del allocation_on_rp['resources'][rc]
1713+
except KeyError as e:
1714+
error_reason = _("Cannot remove resources %(resources)s from "
1715+
"allocation %(allocations)s. Key %(missing_key)s "
1716+
"is missing from the allocation.")
1717+
# rp_uuid is missing from the allocation or resource class is
1718+
# missing from the allocation
1719+
raise exception.AllocationUpdateFailed(
1720+
consumer_uuid=consumer_uuid,
1721+
error=error_reason %
1722+
{'resources': resources,
1723+
'allocations': current_allocs,
1724+
'missing_key': e})
1725+
1726+
# we have to remove the rps from the allocation that has no resources
1727+
# any more
1728+
current_allocs['allocations'] = {
1729+
rp_uuid: alloc
1730+
for rp_uuid, alloc in current_allocs['allocations'].items()
1731+
if alloc['resources']}
1732+
1733+
r = self._put_allocations(
1734+
context, consumer_uuid, current_allocs)
1735+
1736+
if r.status_code != 204:
1737+
err = r.json()['errors'][0]
1738+
if err['code'] == 'placement.concurrent_update':
1739+
reason = ('another process changed the resource providers or '
1740+
'the consumer involved in our attempt to update '
1741+
'allocations for consumer %s so we cannot remove '
1742+
'resources %s from the current allocation %s' %
1743+
(consumer_uuid, resources, current_allocs))
1744+
# NOTE(gibi): automatic retry is meaningful if we can still
1745+
# remove the resources from the updated allocations. Retry
1746+
# works here as this function (re)queries the allocations.
1747+
raise Retry(
1748+
'remove_resources_from_instance_allocation', reason)
1749+
1750+
# It is only here because the retry decorator returns False when runs
1751+
# out of retries. It would be nicer to raise in that case too.
1752+
return True
1753+
16271754
def remove_provider_tree_from_instance_allocation(self, context,
16281755
consumer_uuid,
16291756
root_rp_uuid):

0 commit comments

Comments
 (0)