diff --git a/.gitreview b/.gitreview index c1443cb9c5a..4a6ca9f8a66 100644 --- a/.gitreview +++ b/.gitreview @@ -2,4 +2,4 @@ host=review.opendev.org port=29418 project=openstack/cinder.git -defaultbranch=stable/2023.1 +defaultbranch=unmaintained/2023.1 diff --git a/.zuul.yaml b/.zuul.yaml index 33e11c60a5a..fa808ef1a48 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -58,6 +58,9 @@ irrelevant-files: *gate-irrelevant-files - cinder-tempest-plugin-lvm-lio-barbican: irrelevant-files: *gate-irrelevant-files + - cinder-tempest-plugin-lvm-lio-barbican-fips: + voting: false + irrelevant-files: *gate-irrelevant-files - cinder-grenade-mn-sub-volbak: irrelevant-files: *gate-irrelevant-files - cinder-tempest-lvm-multibackend: @@ -68,6 +71,9 @@ irrelevant-files: *gate-irrelevant-files - devstack-plugin-nfs-tempest-full: irrelevant-files: *gate-irrelevant-files + - devstack-plugin-nfs-tempest-full-fips: + voting: false + irrelevant-files: *gate-irrelevant-files - tempest-slow-py3: irrelevant-files: *gate-irrelevant-files - tempest-integrated-storage: @@ -174,6 +180,17 @@ volume_revert: True volume_image_dep_tests: False +- job: + # this depends on some ceph admin setup which is not yet complete + # TODO(alee) enable this test when ceph admin work is complete. + name: cinder-plugin-ceph-tempest-fips + parent: cinder-plugin-ceph-tempest + nodeset: devstack-single-node-centos-9-stream + pre-run: playbooks/enable-fips.yaml + vars: + configure_swap_size: 4096 + nslookup_target: 'opendev.org' + - job: name: cinder-plugin-ceph-tempest-mn-aa parent: devstack-plugin-ceph-multinode-tempest-py3 diff --git a/bindep.txt b/bindep.txt index d32d02680e4..6311a188539 100644 --- a/bindep.txt +++ b/bindep.txt @@ -29,6 +29,7 @@ postgresql postgresql-client [platform:dpkg] postgresql-devel [platform:rpm] postgresql-server [platform:rpm] +python3-devel [platform:rpm test] libpq-dev [platform:dpkg] thin-provisioning-tools [platform:debian] libxml2-dev [platform:dpkg test] diff --git a/cinder/api/schemas/volume_image_metadata.py b/cinder/api/schemas/volume_image_metadata.py index 7d810f6693d..7b56caf89f1 100644 --- a/cinder/api/schemas/volume_image_metadata.py +++ b/cinder/api/schemas/volume_image_metadata.py @@ -27,7 +27,7 @@ 'os-set_image_metadata': { 'type': 'object', 'properties': { - 'metadata': parameter_types.extra_specs, + 'metadata': parameter_types.image_metadata, }, 'required': ['metadata'], 'additionalProperties': False, diff --git a/cinder/api/v3/router.py b/cinder/api/v3/router.py index e31c9b1f1a0..6c98b1a387f 100644 --- a/cinder/api/v3/router.py +++ b/cinder/api/v3/router.py @@ -210,22 +210,28 @@ def _setup_routes(self, mapper, ext_mgr): member={'accept': 'POST'}) self.resources['default_types'] = default_types.create_resource() - mapper.connect("default-types", "/default-types/{id}", - controller=self.resources['default_types'], - action='create_update', - conditions={"method": ['PUT']}) - - mapper.connect("default-types", "/default-types", - controller=self.resources['default_types'], - action='index', - conditions={"method": ['GET']}) - - mapper.connect("default-types", "/default-types/{id}", - controller=self.resources['default_types'], - action='detail', - conditions={"method": ['GET']}) - - mapper.connect("default-types", "/default-types/{id}", - controller=self.resources['default_types'], - action='delete', - conditions={"method": ['DELETE']}) + for path_prefix in ['/{project_id}', '']: + # project_id is optional + mapper.connect( + "default-types", "%s/default-types/{id}" % path_prefix, + controller=self.resources['default_types'], + action='create_update', + conditions={"method": ['PUT']}) + + mapper.connect( + "default-types", "%s/default-types" % path_prefix, + controller=self.resources['default_types'], + action='index', + conditions={"method": ['GET']}) + + mapper.connect( + "default-types", "%s/default-types/{id}" % path_prefix, + controller=self.resources['default_types'], + action='detail', + conditions={"method": ['GET']}) + + mapper.connect( + "default-types", "%s/default-types/{id}" % path_prefix, + controller=self.resources['default_types'], + action='delete', + conditions={"method": ['DELETE']}) diff --git a/cinder/api/validation/parameter_types.py b/cinder/api/validation/parameter_types.py index 2e098c83ab2..cb8203d548f 100644 --- a/cinder/api/validation/parameter_types.py +++ b/cinder/api/validation/parameter_types.py @@ -157,6 +157,15 @@ def valid_char(char): 'additionalProperties': False } +image_metadata = { + 'type': 'object', + 'patternProperties': { + '^[a-zA-Z0-9-_:. /]{1,255}$': { + 'type': 'string', 'format': 'mysql_text' + } + }, + 'additionalProperties': False +} extra_specs_with_no_spaces_key = { 'type': 'object', diff --git a/cinder/api/validation/validators.py b/cinder/api/validation/validators.py index 7369c1ff583..2d714e6ff0d 100644 --- a/cinder/api/validation/validators.py +++ b/cinder/api/validation/validators.py @@ -398,6 +398,14 @@ def _validate_key_size(param_value): return True +@jsonschema.FormatChecker.cls_checks('mysql_text') +def _validate_mysql_text(param_value): + length = len(param_value.encode('utf8')) + if length > 65535: + return False + return True + + class FormatChecker(jsonschema.FormatChecker): """A FormatChecker can output the message from cause exception diff --git a/cinder/backup/drivers/ceph.py b/cinder/backup/drivers/ceph.py index 12552559820..71eb1b7b1ab 100644 --- a/cinder/backup/drivers/ceph.py +++ b/cinder/backup/drivers/ceph.py @@ -47,6 +47,7 @@ import os import re import subprocess +import tempfile import time from typing import Dict, List, Optional, Tuple # noqa: H301 @@ -618,33 +619,40 @@ def _piped_execute(self, cmd1: list, cmd2: list) -> Tuple[int, bytes]: LOG.debug("Piping cmd1='%s' into...", ' '.join(cmd1)) LOG.debug("cmd2='%s'", ' '.join(cmd2)) - try: - p1 = subprocess.Popen(cmd1, stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - close_fds=True) - except OSError as e: - LOG.error("Pipe1 failed - %s ", e) - raise + with tempfile.TemporaryFile() as errfile: - # NOTE(dosaboy): ensure that the pipe is blocking. This is to work - # around the case where evenlet.green.subprocess is used which seems to - # use a non-blocking pipe. - assert p1.stdout is not None - flags = fcntl.fcntl(p1.stdout, fcntl.F_GETFL) & (~os.O_NONBLOCK) - fcntl.fcntl(p1.stdout, fcntl.F_SETFL, flags) - - try: - p2 = subprocess.Popen(cmd2, stdin=p1.stdout, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - close_fds=True) - except OSError as e: - LOG.error("Pipe2 failed - %s ", e) - raise + try: + p1 = subprocess.Popen(cmd1, stdout=subprocess.PIPE, + stderr=errfile, + close_fds=True) + except OSError as e: + LOG.error("Pipe1 failed - %s ", e) + raise + + # NOTE(dosaboy): ensure that the pipe is blocking. This is to work + # around the case where evenlet.green.subprocess is used which + # seems to use a non-blocking pipe. + assert p1.stdout is not None + flags = fcntl.fcntl(p1.stdout, fcntl.F_GETFL) & (~os.O_NONBLOCK) + fcntl.fcntl(p1.stdout, fcntl.F_SETFL, flags) - p1.stdout.close() - stdout, stderr = p2.communicate() - return p2.returncode, stderr + try: + p2 = subprocess.Popen(cmd2, stdin=p1.stdout, + stdout=subprocess.PIPE, + stderr=errfile, + close_fds=True) + except OSError as e: + LOG.error("Pipe2 failed - %s ", e) + raise + + p1.stdout.close() + p2.communicate() + p1.wait() + + errfile.seek(0) + px_stderr = errfile.read() + + return p1.returncode or p2.returncode, px_stderr def _rbd_diff_transfer(self, src_name: str, src_pool: str, dest_name: str, dest_pool: str, diff --git a/cinder/backup/drivers/swift.py b/cinder/backup/drivers/swift.py index 431f3e20076..4f0da356dd2 100644 --- a/cinder/backup/drivers/swift.py +++ b/cinder/backup/drivers/swift.py @@ -192,7 +192,8 @@ def _headers(self, headers=None): sa_plugin = service_auth.get_service_auth_plugin() if sa_plugin is not None: - result['X-Service-Token'] = sa_plugin.get_token() + sa_session = service_auth.get_service_session() + result['X-Service-Token'] = sa_plugin.get_token(session=sa_session) return result diff --git a/cinder/cmd/manage.py b/cinder/cmd/manage.py index 5783a01700f..ca2f6de20f0 100644 --- a/cinder/cmd/manage.py +++ b/cinder/cmd/manage.py @@ -645,6 +645,16 @@ def update_host(self, currenthost: str, newhost: str) -> None: db.volume_update(ctxt, v['id'], {'host': newhost}) + def update_service(self): + """Modify the service uuid associated with a volume. + + In certain upgrade cases, we create new cinder services and delete the + records of old ones, however, the volumes created with old service + still contain the service uuid of the old services. + """ + ctxt = context.get_admin_context() + db.volume_update_all_by_service(ctxt) + class ConfigCommands(object): """Class for exposing the flags defined by flag_file(s).""" diff --git a/cinder/db/api.py b/cinder/db/api.py index 03b0ab33bcc..0d9b5ff22aa 100644 --- a/cinder/db/api.py +++ b/cinder/db/api.py @@ -443,6 +443,11 @@ def volume_get_all_by_host(context, host, filters=None): return IMPL.volume_get_all_by_host(context, host, filters=filters) +def volume_update_all_by_service(context): + """Update all volumes associated with an old service.""" + return IMPL.volume_update_all_by_service(context) + + def volume_get_all_by_group(context, group_id, filters=None): """Get all volumes belonging to a consistency group.""" return IMPL.volume_get_all_by_group(context, group_id, filters=filters) diff --git a/cinder/db/sqlalchemy/api.py b/cinder/db/sqlalchemy/api.py index c59dea44c34..840dc353ccb 100644 --- a/cinder/db/sqlalchemy/api.py +++ b/cinder/db/sqlalchemy/api.py @@ -2862,6 +2862,32 @@ def volume_get_all_by_group(context, group_id, filters=None): return query.all() +@require_admin_context +@main_context_manager.writer +def volume_update_all_by_service(context): + """Ensure volumes have the correct service_uuid value for their host. + + In some deployment tools, when performing an upgrade, all service records + are recreated including c-vol service which gets a new record in the + services table, though its host name is constant. Later we then delete the + old service record. + As a consequence, the volumes have the right host name but the service + UUID needs to be updated to the ID of the new service record. + + :param context: context to query under + """ + # Get all cinder-volume services + services = service_get_all(context, binary='cinder-volume') + for service in services: + query = model_query(context, models.Volume) + query = query.filter( + _filter_host( + models.Volume.host, service.host), + models.Volume.service_uuid != service.uuid) + query.update( + {"service_uuid": service.uuid}, synchronize_session=False) + + @require_context @main_context_manager.reader def volume_get_all_by_generic_group(context, group_id, filters=None): @@ -8678,9 +8704,8 @@ def use_quota_online_data_migration( calculate_use_quota, ): updated = 0 - query = model_query(context, getattr(models, resource_name)).filter_by( - use_quota=None - ) + query = model_query(context, getattr(models, resource_name), + read_deleted='yes').filter_by(use_quota=None) if resource_name == 'Volume': query = query.options(joinedload(models.Volume.volume_admin_metadata)) total = query.count() diff --git a/cinder/image/image_utils.py b/cinder/image/image_utils.py index 92d0ddd875b..11a899c8e20 100644 --- a/cinder/image/image_utils.py +++ b/cinder/image/image_utils.py @@ -104,9 +104,9 @@ 'an operator has configured glance property protections ' 'to make some image properties read-only. Cinder will ' '*always* filter out image metadata in the namespaces ' - '`os_glance` and `img_signature`; this configuration ' - 'option allows operators to specify *additional* ' - 'namespaces to be excluded.', + '`os_glance`, `img_signature` and `signature_verified`; ' + 'this configuration option allows operators to specify ' + '*additional* namespaces to be excluded.', default=[]), ] @@ -130,7 +130,8 @@ COMPRESSIBLE_IMAGE_FORMATS = ('qcow2',) -GLANCE_RESERVED_NAMESPACES = ["os_glance", "img_signature"] +GLANCE_RESERVED_NAMESPACES = ["os_glance", "img_signature", + "signature_verified"] def validate_stores_id(context: context.RequestContext, diff --git a/cinder/service.py b/cinder/service.py index a45d48f679b..1b335cd70a1 100644 --- a/cinder/service.py +++ b/cinder/service.py @@ -42,6 +42,7 @@ from cinder.common import constants from cinder import context from cinder import coordination +from cinder import db from cinder import exception from cinder.i18n import _ from cinder import objects @@ -366,6 +367,9 @@ def _create_service_ref(self, # If we have updated the service_ref with replication data from # the cluster it will be saved. service_ref.save() + # Update all volumes that are associated with an old service with + # the new service uuid + db.volume_update_all_by_service(context) def __getattr__(self, key: str): manager = self.__dict__.get('manager', None) diff --git a/cinder/service_auth.py b/cinder/service_auth.py index 1e092454e06..fd854f85172 100644 --- a/cinder/service_auth.py +++ b/cinder/service_auth.py @@ -19,6 +19,7 @@ CONF = cfg.CONF _SERVICE_AUTH = None +_SERVICE_SESSION = None SERVICE_USER_GROUP = 'service_user' @@ -66,6 +67,16 @@ def get_service_auth_plugin(): return None +def get_service_session(): + if CONF.service_user.send_service_user_token: + global _SERVICE_SESSION + if not _SERVICE_SESSION: + _SERVICE_SESSION = ks_loading.load_session_from_conf_options( + CONF, SERVICE_USER_GROUP, auth=get_service_auth_plugin()) + return _SERVICE_SESSION + return None + + def get_auth_plugin(context, auth=None): if auth: user_auth = auth diff --git a/cinder/tests/unit/api/contrib/test_volume_image_metadata.py b/cinder/tests/unit/api/contrib/test_volume_image_metadata.py index 16b77d75efd..7ca96fc2379 100644 --- a/cinder/tests/unit/api/contrib/test_volume_image_metadata.py +++ b/cinder/tests/unit/api/contrib/test_volume_image_metadata.py @@ -220,6 +220,44 @@ def test_create_image_metadata(self, fake_get): self.assertEqual(fake_image_metadata, jsonutils.loads(res.body)["metadata"]) + # Test for value > 255 + body = {"os-set_image_metadata": { + "metadata": {"key": "v" * 260}} + } + req = webob.Request.blank('/v3/%s/volumes/%s/action' % ( + fake.PROJECT_ID, fake.VOLUME_ID)) + req.method = "POST" + req.body = jsonutils.dump_as_bytes(body) + req.headers["content-type"] = "application/json" + fake_get.return_value = {} + res = req.get_response(fakes.wsgi_app( + fake_auth_context=self.user_ctxt)) + self.assertEqual(HTTPStatus.OK, res.status_int) + + # This is a weird one ... take a supplementary unicode + # char that requires 4 bytes, which will give us a short + # string in terms of character count, but a long string + # in terms of bytes, and make this string be exactly + # 65535 bytes in length. This should be OK. + char4bytes = "\N{CJK UNIFIED IDEOGRAPH-29D98}" + self.assertEqual(1, len(char4bytes)) + self.assertEqual(4, len(char4bytes.encode('utf-8'))) + str65535bytes = char4bytes * 16383 + '123' + self.assertLess(len(str65535bytes), 65535) + self.assertEqual(65535, len(str65535bytes.encode('utf-8'))) + body = {"os-set_image_metadata": { + "metadata": {"key": str65535bytes}} + } + req = webob.Request.blank('/v3/%s/volumes/%s/action' % ( + fake.PROJECT_ID, fake.VOLUME_ID)) + req.method = "POST" + req.body = jsonutils.dump_as_bytes(body) + req.headers["content-type"] = "application/json" + fake_get.return_value = {} + res = req.get_response(fakes.wsgi_app( + fake_auth_context=self.user_ctxt)) + self.assertEqual(HTTPStatus.OK, res.status_int) + @mock.patch('cinder.objects.Volume.get_by_id') def test_create_image_metadata_policy_not_authorized(self, fake_get): rules = { @@ -323,15 +361,38 @@ def test_invalid_metadata_items_on_create(self, fake_get): self.controller.create, req, fake.VOLUME_ID, body=data) - # Test for long value + # Test for very long value data = {"os-set_image_metadata": { - "metadata": {"key": "v" * 260}} + "metadata": {"key": "v" * 65550}} + } + req.body = jsonutils.dump_as_bytes(data) + self.assertRaises(exception.ValidationError, + self.controller.create, req, fake.VOLUME_ID, + body=data) + + # Test for very long utf8 value + data = {"os-set_image_metadata": { + "metadata": {"key": "รก" * 32775}} } req.body = jsonutils.dump_as_bytes(data) self.assertRaises(exception.ValidationError, self.controller.create, req, fake.VOLUME_ID, body=data) + # Test a short unicode string that actually exceeds + # the allowed byte count + char4bytes = "\N{CJK UNIFIED IDEOGRAPH-29D98}" + str65536bytes = char4bytes * 16384 + self.assertEqual(65536, len(str65536bytes.encode('utf-8'))) + self.assertLess(len(str65536bytes), 65535) + body = {"os-set_image_metadata": { + "metadata": {"key": str65536bytes}} + } + req.body = jsonutils.dump_as_bytes(body) + self.assertRaises(exception.ValidationError, + self.controller.create, req, fake.VOLUME_ID, + body=data) + # Test for empty key. data = {"os-set_image_metadata": { "metadata": {"": "value1"}} diff --git a/cinder/tests/unit/backup/drivers/test_backup_ceph.py b/cinder/tests/unit/backup/drivers/test_backup_ceph.py index b5bef257aa4..8d8cc9f0b2a 100644 --- a/cinder/tests/unit/backup/drivers/test_backup_ceph.py +++ b/cinder/tests/unit/backup/drivers/test_backup_ceph.py @@ -157,6 +157,10 @@ def communicate(mock_inst): self.callstack.append('communicate') return retval + def wait(mock_inst): + self.callstack.append('wait') + return retval + subprocess.Popen.side_effect = MockPopen def setUp(self): @@ -522,7 +526,8 @@ def mock_read_data(): 'popen_init', 'write', 'stdout_close', - 'communicate'], self.callstack) + 'communicate', + 'wait'], self.callstack) self.assertFalse(mock_full_backup.called) self.assertFalse(mock_get_backup_snaps.called) @@ -1412,8 +1417,8 @@ def test_piped_execute(self, mock_fcntl): mock_fcntl.return_value = 0 self._setup_mock_popen(['out', 'err']) self.service._piped_execute(['foo'], ['bar']) - self.assertEqual(['popen_init', 'popen_init', - 'stdout_close', 'communicate'], self.callstack) + self.assertEqual(['popen_init', 'popen_init', 'stdout_close', + 'communicate', 'wait'], self.callstack) @common_mocks def test_restore_metdata(self): diff --git a/cinder/tests/unit/backup/drivers/test_backup_swift.py b/cinder/tests/unit/backup/drivers/test_backup_swift.py index 750e7ba1385..04c72e241cb 100644 --- a/cinder/tests/unit/backup/drivers/test_backup_swift.py +++ b/cinder/tests/unit/backup/drivers/test_backup_swift.py @@ -317,7 +317,7 @@ def test_backup_swift_service_auth_headers_partial_enabled(self): @mock.patch.object(service_auth, 'get_service_auth_plugin') def test_backup_swift_service_auth_headers_enabled(self, mock_plugin): class FakeServiceAuthPlugin: - def get_token(self): + def get_token(self, session): return "fake" self.override_config('send_service_user_token', True, group='service_user') diff --git a/cinder/tests/unit/test_db_api.py b/cinder/tests/unit/test_db_api.py index caccd2ffe9f..50878f1f55c 100644 --- a/cinder/tests/unit/test_db_api.py +++ b/cinder/tests/unit/test_db_api.py @@ -910,6 +910,42 @@ def test_volume_get_all_by_group_with_filters(self): filters={'foo': 'bar'}) self.assertEqual([], vols) + def test_volume_update_all_by_service(self): + volume_service_uuid = '918f24b6-c4c9-48e6-86c6-6871e91f4779' + alt_vol_service_uuid = '4b3356a0-31e1-4cec-af1c-07e1e0d7dcf0' + service_uuid_1 = 'c7b169f8-8da6-4330-b462-0467069371e2' + service_uuid_2 = '38d41b71-2f4e-4d3e-8206-d51ace608bca' + host = 'fake_host' + alt_host = 'alt_fake_host' + binary = 'cinder-volume' + # Create 3 volumes with host 'fake_host' + for i in range(3): + db.volume_create(self.ctxt, { + 'service_uuid': volume_service_uuid, + 'host': host, + 'volume_type_id': fake.VOLUME_TYPE_ID}) + # Create 2 volumes with host 'alt_fake_host' + for i in range(2): + db.volume_create(self.ctxt, { + 'service_uuid': alt_vol_service_uuid, + 'host': alt_host, + 'volume_type_id': fake.VOLUME_TYPE_ID}) + # Create service entry for 'fake_host' + utils.create_service( + self.ctxt, + {'uuid': service_uuid_1, 'host': host, 'binary': binary}) + # Create service entry for 'alt_fake_host' + utils.create_service( + self.ctxt, + {'uuid': service_uuid_2, 'host': alt_host, 'binary': binary}) + db.volume_update_all_by_service(self.ctxt) + volumes = db.volume_get_all(self.ctxt) + for volume in volumes: + if volume.host == host: + self.assertEqual(service_uuid_1, volume.service_uuid) + elif volume.host == alt_host: + self.assertEqual(service_uuid_2, volume.service_uuid) + def test_volume_get_all_by_project(self): volumes = [] for i in range(3): @@ -3982,7 +4018,8 @@ def test_use_quota_online_data_migration(self, query_mock, models_mock): calculate_method) query_mock.assert_called_once_with(self.ctxt, - models_mock.resource_name) + models_mock.resource_name, + read_deleted='yes') query_mock.return_value.filter_by.assert_called_once_with( use_quota=None) query.count.assert_called_once_with() diff --git a/cinder/tests/unit/test_image_utils.py b/cinder/tests/unit/test_image_utils.py index 2918cf852a9..d0a193e31b1 100644 --- a/cinder/tests/unit/test_image_utils.py +++ b/cinder/tests/unit/test_image_utils.py @@ -2734,7 +2734,7 @@ def test_filter_out_reserved_namespaces_metadata_with_empty_metadata(self): @ddt.unpack def test_filter_out_reserved_namespaces_metadata( self, metadata_for_test, config, keys_to_pop): - hardcoded_keys = ['os_glance', "img_signature"] + hardcoded_keys = image_utils.GLANCE_RESERVED_NAMESPACES keys_to_pop = hardcoded_keys + keys_to_pop @@ -2794,7 +2794,7 @@ def test_filter_out_reserved_namespaces_metadata( @ddt.unpack def test_filter_out_reserved_namespaces_metadata_properties( self, metadata_for_test, config, keys_to_pop): - hardcoded_keys = ['os_glance', "img_signature"] + hardcoded_keys = image_utils.GLANCE_RESERVED_NAMESPACES keys_to_pop = hardcoded_keys + keys_to_pop diff --git a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_mirror_fc.py b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_mirror_fc.py index 3b3249f8a05..b4e3e135d81 100644 --- a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_mirror_fc.py +++ b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_mirror_fc.py @@ -1,4 +1,4 @@ -# Copyright (C) 2022, 2023, Hitachi, Ltd. +# Copyright (C) 2022, 2024, Hitachi, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -141,19 +141,20 @@ def _volume_get(context, volume_id): TEST_SNAPSHOT = [] -snapshot = {} -snapshot['id'] = '10000000-0000-0000-0000-{0:012d}'.format(0) -snapshot['name'] = 'TEST_SNAPSHOT{0:d}'.format(0) -snapshot['provider_location'] = '{0:d}'.format(1) -snapshot['status'] = 'available' -snapshot['volume_id'] = '00000000-0000-0000-0000-{0:012d}'.format(0) -snapshot['volume'] = _volume_get(None, snapshot['volume_id']) -snapshot['volume_name'] = 'test-volume{0:d}'.format(0) -snapshot['volume_size'] = 128 -snapshot = obj_snap.Snapshot._from_db_object( - CTXT, obj_snap.Snapshot(), - fake_snapshot.fake_db_snapshot(**snapshot)) -TEST_SNAPSHOT.append(snapshot) +for i in range(2): + snapshot = {} + snapshot['id'] = '10000000-0000-0000-0000-{0:012d}'.format(i) + snapshot['name'] = 'TEST_SNAPSHOT{0:d}'.format(i) + snapshot['provider_location'] = '{0:d}'.format(i + 1) + snapshot['status'] = 'available' + snapshot['volume_id'] = '00000000-0000-0000-0000-{0:012d}'.format(i) + snapshot['volume'] = _volume_get(None, snapshot['volume_id']) + snapshot['volume_name'] = 'test-volume{0:d}'.format(i) + snapshot['volume_size'] = 128 + snapshot = obj_snap.Snapshot._from_db_object( + CTXT, obj_snap.Snapshot(), + fake_snapshot.fake_db_snapshot(**snapshot)) + TEST_SNAPSHOT.append(snapshot) TEST_GROUP = [] for i in range(2): @@ -270,6 +271,29 @@ def _volume_get(context, volume_id): "poolId": 30, "dataReductionStatus": "DISABLED", "dataReductionMode": "disabled", + "label": "00000000000000000000000000000000", +} + +GET_LDEV_RESULT_SPLIT = { + "emulationType": "OPEN-V-CVS", + "blockCapacity": 2097152, + "attributes": ["CVS", "HDP"], + "status": "NML", + "poolId": 30, + "dataReductionStatus": "DISABLED", + "dataReductionMode": "disabled", + "label": "00000000000000000000000000000004", +} + +GET_LDEV_RESULT_LABEL = { + "emulationType": "OPEN-V-CVS", + "blockCapacity": 2097152, + "attributes": ["CVS", "HDP"], + "status": "NML", + "poolId": 30, + "dataReductionStatus": "DISABLED", + "dataReductionMode": "disabled", + "label": "00000000000000000000000000000001", } GET_LDEV_RESULT_MAPPED = { @@ -307,6 +331,7 @@ def _volume_get(context, volume_id): "blockCapacity": 2097152, "attributes": ["CVS", "HDP", "HTI"], "status": "NML", + "label": "10000000000000000000000000000000", } GET_LDEV_RESULT_REP = { @@ -315,6 +340,16 @@ def _volume_get(context, volume_id): "attributes": ["CVS", "HDP", "GAD"], "status": "NML", "numOfPorts": 1, + "label": "00000000000000000000000000000004", +} + +GET_LDEV_RESULT_REP_LABEL = { + "emulationType": "OPEN-V-CVS", + "blockCapacity": 2097152, + "attributes": ["CVS", "HDP", "GAD"], + "status": "NML", + "numOfPorts": 1, + "label": "00000000000000000000000000000001", } GET_POOL_RESULT = { @@ -373,6 +408,18 @@ def _volume_get(context, volume_id): ], } +GET_SNAPSHOTS_RESULT_TEST = { + "data": [ + { + "primaryOrSecondary": "S-VOL", + "status": "PSUS", + "pvolLdevId": 1, + "muNumber": 1, + "svolLdevId": 1, + }, + ], +} + GET_LUNS_RESULT = { "data": [ { @@ -773,10 +820,8 @@ def _request_side_effect( self.configuration.hitachi_mirror_pair_target_number), drv.common.rep_secondary._PAIR_TARGET_NAME) # stop the Loopingcall within the do_setup treatment - self.driver.common.rep_primary.client.keep_session_loop.stop() - self.driver.common.rep_primary.client.keep_session_loop.wait() - self.driver.common.rep_secondary.client.keep_session_loop.stop() - self.driver.common.rep_secondary.client.keep_session_loop.wait() + drv.common.rep_primary.client.keep_session_loop.stop() + drv.common.rep_secondary.client.keep_session_loop.stop() self._setup_config() @mock.patch.object(requests.Session, "request") @@ -878,19 +923,42 @@ def _request_side_effect( self.ldev_count = self.ldev_count + 1 return FakeResponse(200, GET_LDEV_RESULT_REP) else: - return FakeResponse(200, GET_LDEV_RESULT) + return FakeResponse(200, GET_LDEV_RESULT_SPLIT) else: if method in ('POST', 'PUT', 'DELETE'): return FakeResponse(202, REMOTE_COMPLETED_SUCCEEDED_RESULT) elif method == 'GET': if '/ldevs/' in url: - return FakeResponse(200, GET_LDEV_RESULT) + return FakeResponse(200, GET_LDEV_RESULT_SPLIT) return FakeResponse( 500, ERROR_RESULT, headers={'Content-Type': 'json'}) request.side_effect = _request_side_effect self.driver.delete_volume(TEST_VOLUME[4]) self.assertEqual(17, request.call_count) + @mock.patch.object(requests.Session, "request") + def test_delete_volume_primary_is_invalid_ldev(self, request): + request.return_value = FakeResponse(200, GET_LDEV_RESULT_LABEL) + self.driver.delete_volume(TEST_VOLUME[0]) + self.assertEqual(1, request.call_count) + + @mock.patch.object(requests.Session, "request") + def test_delete_volume_primary_secondary_is_invalid_ldev(self, request): + request.return_value = FakeResponse(200, GET_LDEV_RESULT_REP_LABEL) + self.driver.delete_volume(TEST_VOLUME[4]) + self.assertEqual(2, request.call_count) + + @mock.patch.object(requests.Session, "request") + def test_delete_volume_secondary_is_invalid_ldev(self, request): + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_REP_LABEL), + FakeResponse(200, GET_LDEV_RESULT_REP), + FakeResponse(200, GET_LDEV_RESULT_REP), + FakeResponse(200, GET_LDEV_RESULT_REP), + FakeResponse(200, GET_LDEV_RESULT_REP), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] + self.driver.delete_volume(TEST_VOLUME[4]) + self.assertEqual(6, request.call_count) + @mock.patch.object(requests.Session, "request") def test_extend_volume(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), @@ -968,7 +1036,8 @@ def test_create_snapshot( request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), - FakeResponse(200, GET_SNAPSHOTS_RESULT)] + FakeResponse(200, GET_SNAPSHOTS_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.common.rep_primary._stats = {} self.driver.common.rep_primary._stats['pools'] = [ {'location_info': {'pool_id': 30}}] @@ -978,7 +1047,7 @@ def test_create_snapshot( ret = self.driver.create_snapshot(TEST_SNAPSHOT[0]) actual = {'provider_location': '1'} self.assertEqual(actual, ret) - self.assertEqual(4, request.call_count) + self.assertEqual(5, request.call_count) @mock.patch.object(requests.Session, "request") def test_delete_snapshot(self, request): @@ -999,6 +1068,23 @@ def test_delete_snapshot(self, request): self.driver.delete_snapshot(TEST_SNAPSHOT[0]) self.assertEqual(14, request.call_count) + @mock.patch.object(requests.Session, "request") + def test_delete_snapshot_pldev_in_loc(self, request): + self.assertRaises(exception.VolumeDriverException, + self.driver.delete_snapshot, + TEST_SNAPSHOT[1]) + self.assertEqual(1, request.call_count) + + @mock.patch.object(requests.Session, "request") + def test_delete_snapshot_snapshot_is_busy(self, request): + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR), + FakeResponse(200, NOTFOUND_RESULT), + FakeResponse(200, GET_SNAPSHOTS_RESULT_TEST)] + self.assertRaises(exception.SnapshotIsBusy, + self.driver.delete_snapshot, + TEST_SNAPSHOT[0]) + self.assertEqual(3, request.call_count) + @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type') @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @@ -1232,6 +1318,14 @@ def test_unmanage(self, request): self.driver.unmanage(TEST_VOLUME[0]) self.assertEqual(3, request.call_count) + @mock.patch.object(requests.Session, "request") + def test_unmanage_has_rep_pair_true(self, request): + request.return_value = FakeResponse(200, GET_LDEV_RESULT_REP) + self.assertRaises(exception.VolumeDriverException, + self.driver.unmanage, + TEST_VOLUME[4]) + self.assertEqual(1, request.call_count) + @mock.patch.object(requests.Session, "request") def test_copy_image_to_volume(self, request): image_service = 'fake_image_service' @@ -1250,15 +1344,25 @@ def test_copy_image_to_volume(self, request): @mock.patch.object(requests.Session, "request") def test_update_migrated_volume(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), - FakeResponse(200, COMPLETED_SUCCEEDED_RESULT)] - self.assertRaises( - NotImplementedError, - self.driver.update_migrated_volume, - self.ctxt, - TEST_VOLUME[0], - TEST_VOLUME[1], - "available") + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] + ret = self.driver.update_migrated_volume( + self.ctxt, TEST_VOLUME[0], TEST_VOLUME[1], "available") self.assertEqual(2, request.call_count) + actual = ({'_name_id': TEST_VOLUME[1]['id'], + 'provider_location': TEST_VOLUME[1]['provider_location']}) + self.assertEqual(actual, ret) + + @mock.patch.object(requests.Session, "request") + def test_update_migrated_volume_replication(self, request): + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_REP), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] + ret = self.driver.update_migrated_volume( + self.ctxt, TEST_VOLUME[0], TEST_VOLUME[4], "available") + self.assertEqual(3, request.call_count) + actual = ({'_name_id': TEST_VOLUME[4]['id'], + 'provider_location': TEST_VOLUME[4]['provider_location']}) + self.assertEqual(actual, ret) def test_unmanage_snapshot(self): """The driver don't support unmange_snapshot.""" @@ -1408,6 +1512,39 @@ def test_create_group_from_src_volume( 'provider_location': '1'}]) self.assertTupleEqual(actual, ret) + @mock.patch.object(requests.Session, "request") + @mock.patch.object(volume_types, 'get_volume_type') + @mock.patch.object(volume_types, 'get_volume_type_extra_specs') + def test_create_group_from_src_Exception( + self, get_volume_type_extra_specs, get_volume_type, request): + extra_specs = {"test1": "aaa"} + get_volume_type_extra_specs.return_value = extra_specs + get_volume_type.return_value = {} + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, GET_SNAPSHOTS_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] + self.driver.common.rep_primary._stats = {} + self.driver.common.rep_primary._stats['pools'] = [ + {'location_info': {'pool_id': 30}}] + self.driver.common.rep_secondary._stats = {} + self.driver.common.rep_secondary._stats['pools'] = [ + {'location_info': {'pool_id': 40}}] + self.assertRaises(exception.VolumeDriverException, + self.driver.create_group_from_src, + self.ctxt, TEST_GROUP[1], + [TEST_VOLUME[1], TEST_VOLUME[1]], + source_group=TEST_GROUP[0], + source_vols=[TEST_VOLUME[0], TEST_VOLUME[3]] + ) + self.assertEqual(10, request.call_count) + @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type') @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @@ -1458,7 +1595,8 @@ def test_create_group_snapshot_non_cg( request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), - FakeResponse(200, GET_SNAPSHOTS_RESULT)] + FakeResponse(200, GET_SNAPSHOTS_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.common.rep_primary._stats = {} self.driver.common.rep_primary._stats['pools'] = [ {'location_info': {'pool_id': 30}}] @@ -1468,7 +1606,7 @@ def test_create_group_snapshot_non_cg( ret = self.driver.create_group_snapshot( self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]] ) - self.assertEqual(4, request.call_count) + self.assertEqual(5, request.call_count) actual = ( {'status': 'available'}, [{'id': TEST_SNAPSHOT[0]['id'], diff --git a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_fc.py b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_fc.py index f0f635c052f..3293c0007c9 100644 --- a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_fc.py +++ b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_fc.py @@ -1,4 +1,4 @@ -# Copyright (C) 2020, 2023, Hitachi, Ltd. +# Copyright (C) 2020, 2024, Hitachi, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -219,6 +219,29 @@ def _volume_get(context, volume_id): "poolId": 30, "dataReductionStatus": "DISABLED", "dataReductionMode": "disabled", + "label": "00000000000000000000000000000000", +} + +GET_LDEV_RESULT_LABEL = { + "emulationType": "OPEN-V-CVS", + "blockCapacity": 2097152, + "attributes": ["CVS", "HDP"], + "status": "NML", + "poolId": 30, + "dataReductionStatus": "DISABLED", + "dataReductionMode": "disabled", + "label": "00000000000000000000000000000001", +} + +GET_LDEV_RESULT_SNAP = { + "emulationType": "OPEN-V-CVS", + "blockCapacity": 2097152, + "attributes": ["CVS", "HDP"], + "status": "NML", + "poolId": 30, + "dataReductionStatus": "DISABLED", + "dataReductionMode": "disabled", + "label": "10000000000000000000000000000000", } GET_LDEV_RESULT_MAPPED = { @@ -241,6 +264,23 @@ def _volume_get(context, volume_id): "blockCapacity": 2097152, "attributes": ["CVS", "HDP", "HTI"], "status": "NML", + "label": "00000000000000000000000000000000", +} + +GET_LDEV_RESULT_PAIR_SNAP = { + "emulationType": "OPEN-V-CVS", + "blockCapacity": 2097152, + "attributes": ["CVS", "HDP", "HTI"], + "status": "NML", + "label": "10000000000000000000000000000000", +} + +GET_LDEV_RESULT_PAIR_TEST = { + "emulationType": "OPEN-V-CVS", + "blockCapacity": 2097152, + "attributes": ["CVS", "HDP", "HTI", "111"], + "status": "NML", + "snapshotPoolId": 0 } GET_LDEV_RESULT_PAIR_STATUS_TEST = { @@ -638,8 +678,7 @@ def test_do_setup(self, brick_get_connector_properties, request): self.assertEqual(1, brick_get_connector_properties.call_count) self.assertEqual(4, request.call_count) # stop the Loopingcall within the do_setup treatment - self.driver.common.client.keep_session_loop.stop() - self.driver.common.client.keep_session_loop.wait() + drv.common.client.keep_session_loop.stop() @mock.patch.object(requests.Session, "request") @mock.patch.object( @@ -666,8 +705,7 @@ def test_do_setup_create_hg(self, brick_get_connector_properties, request): self.assertEqual(1, brick_get_connector_properties.call_count) self.assertEqual(9, request.call_count) # stop the Loopingcall within the do_setup treatment - self.driver.common.client.keep_session_loop.stop() - self.driver.common.client.keep_session_loop.wait() + drv.common.client.keep_session_loop.stop() @mock.patch.object(requests.Session, "request") @mock.patch.object( @@ -695,8 +733,7 @@ def test_do_setup_create_hg_format( self.assertEqual(1, brick_get_connector_properties.call_count) self.assertEqual(9, request.call_count) # stop the Loopingcall within the do_setup treatment - self.driver.common.client.keep_session_loop.stop() - self.driver.common.client.keep_session_loop.wait() + drv.common.client.keep_session_loop.stop() @mock.patch.object(requests.Session, "request") @mock.patch.object( @@ -748,8 +785,7 @@ def test_do_setup_create_hg_port_scheduler( self.assertEqual(1, brick_get_connector_properties.call_count) self.assertEqual(10, request.call_count) # stop the Loopingcall within the do_setup treatment - self.driver.common.client.keep_session_loop.stop() - self.driver.common.client.keep_session_loop.wait() + drv.common.client.keep_session_loop.stop() @mock.patch.object(requests.Session, "request") @mock.patch.object( @@ -775,8 +811,7 @@ def test_do_setup_pool_name(self, brick_get_connector_properties, request): self.assertEqual(5, request.call_count) self.configuration.hitachi_pools = tmp_pools # stop the Loopingcall within the do_setup treatment - self.driver.common.client.keep_session_loop.stop() - self.driver.common.client.keep_session_loop.wait() + drv.common.client.keep_session_loop.stop() @mock.patch.object(requests.Session, "request") def test_create_volume(self, request): @@ -834,7 +869,7 @@ def test_delete_volume(self, request): self.assertEqual(4, request.call_count) @mock.patch.object(requests.Session, "request") - def test_delete_volume_temporary_busy(self, request): + def test_delete_volume_wait_copy_pair_deleting(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR), FakeResponse(200, GET_SNAPSHOTS_RESULT_BUSY), FakeResponse(200, GET_LDEV_RESULT), @@ -849,7 +884,7 @@ def test_delete_volume_temporary_busy(self, request): @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=test_utils.ZeroIntervalLoopingCall) @mock.patch.object(requests.Session, "request") - def test_delete_volume_busy_timeout(self, request): + def test_delete_volume_request_failed(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR), FakeResponse(200, GET_SNAPSHOTS_RESULT_BUSY), FakeResponse(200, GET_LDEV_RESULT_PAIR), @@ -860,6 +895,21 @@ def test_delete_volume_busy_timeout(self, request): TEST_VOLUME[0]) self.assertGreater(request.call_count, 2) + @mock.patch.object(requests.Session, "request") + def test_delete_volume_volume_is_busy(self, request): + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR), + FakeResponse(200, GET_SNAPSHOTS_RESULT_PAIR)] + self.assertRaises(exception.VolumeIsBusy, + self.driver.delete_volume, + TEST_VOLUME[0]) + self.assertEqual(2, request.call_count) + + @mock.patch.object(requests.Session, "request") + def test_delete_volume_is_invalid_ldev(self, request): + request.return_value = FakeResponse(200, GET_LDEV_RESULT_LABEL) + self.driver.delete_volume(TEST_VOLUME[0]) + self.assertEqual(1, request.call_count) + @mock.patch.object(requests.Session, "request") def test_extend_volume(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), @@ -906,6 +956,7 @@ def test_get_volume_stats( @mock.patch.object(driver.FibreChannelDriver, "get_goodness_function") @mock.patch.object(driver.FibreChannelDriver, "get_filter_function") @mock.patch.object(hbsd_rest.HBSDREST, "get_pool_info") + @mock.patch.object(requests.Session, 'request', new=mock.MagicMock()) def test_get_volume_stats_error( self, get_pool_info, get_filter_function, get_goodness_function): get_pool_info.side_effect = exception.VolumeDriverException(data='') @@ -939,7 +990,8 @@ def test_create_snapshot( request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), - FakeResponse(200, GET_SNAPSHOTS_RESULT)] + FakeResponse(200, GET_SNAPSHOTS_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] get_volume_type_extra_specs.return_value = {} self.driver.common._stats = {} self.driver.common._stats['pools'] = [ @@ -947,7 +999,7 @@ def test_create_snapshot( ret = self.driver.create_snapshot(TEST_SNAPSHOT[0]) self.assertEqual('1', ret['provider_location']) self.assertEqual(1, get_volume_type_extra_specs.call_count) - self.assertEqual(4, request.call_count) + self.assertEqual(5, request.call_count) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @@ -957,7 +1009,8 @@ def test_create_snapshot_dedup_false( request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), - FakeResponse(200, GET_SNAPSHOTS_RESULT)] + FakeResponse(200, GET_SNAPSHOTS_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] get_volume_type_extra_specs.return_value = {'hbsd:capacity_saving': 'disable'} self.driver.common._stats = {} @@ -966,11 +1019,11 @@ def test_create_snapshot_dedup_false( ret = self.driver.create_snapshot(TEST_SNAPSHOT[0]) self.assertEqual('1', ret['provider_location']) self.assertEqual(1, get_volume_type_extra_specs.call_count) - self.assertEqual(4, request.call_count) + self.assertEqual(5, request.call_count) @mock.patch.object(requests.Session, "request") def test_delete_snapshot(self, request): - request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR), + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR_SNAP), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), @@ -990,7 +1043,7 @@ def test_delete_snapshot(self, request): @mock.patch.object(requests.Session, "request") def test_delete_snapshot_no_pair(self, request): """Normal case: Delete a snapshot without pair.""" - request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_SNAP), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] @@ -1243,6 +1296,31 @@ def test_unmanage(self, request): self.driver.unmanage(TEST_VOLUME[0]) self.assertEqual(2, request.call_count) + @mock.patch.object(requests.Session, "request") + def test_unmanage_volume_is_busy(self, request): + request.side_effect = [ + FakeResponse(200, GET_LDEV_RESULT_PAIR), + FakeResponse(200, GET_LDEV_RESULT_PAIR), + FakeResponse(200, NOTFOUND_RESULT), + FakeResponse(200, GET_SNAPSHOTS_RESULT_PAIR), + ] + self.assertRaises(exception.VolumeIsBusy, + self.driver.unmanage, + TEST_VOLUME[1]) + self.assertEqual(4, request.call_count) + + @mock.patch.object(requests.Session, "request") + def test_unmanage_volume_is_busy_raise_ex(self, request): + request.side_effect = [ + FakeResponse(200, GET_LDEV_RESULT_PAIR), + FakeResponse(200, GET_LDEV_RESULT_PAIR), + FakeResponse(400, GET_SNAPSHOTS_RESULT_BUSY) + ] + self.assertRaises(exception.VolumeDriverException, + self.driver.unmanage, + TEST_VOLUME[0]) + self.assertEqual(3, request.call_count) + @mock.patch.object(requests.Session, "request") def test_copy_image_to_volume(self, request): image_service = 'fake_image_service' @@ -1260,14 +1338,12 @@ def test_copy_image_to_volume(self, request): @mock.patch.object(requests.Session, "request") def test_update_migrated_volume(self, request): request.return_value = FakeResponse(202, COMPLETED_SUCCEEDED_RESULT) - self.assertRaises( - NotImplementedError, - self.driver.update_migrated_volume, - self.ctxt, - TEST_VOLUME[0], - TEST_VOLUME[1], - "available") + ret = self.driver.update_migrated_volume( + self.ctxt, TEST_VOLUME[0], TEST_VOLUME[1], "available") self.assertEqual(1, request.call_count) + actual = ({'_name_id': TEST_VOLUME[1]['id'], + 'provider_location': TEST_VOLUME[1]['provider_location']}) + self.assertEqual(actual, ret) def test_unmanage_snapshot(self): """The driver don't support unmange_snapshot.""" @@ -1471,7 +1547,8 @@ def test_create_group_snapshot_non_cg( request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), - FakeResponse(200, GET_SNAPSHOTS_RESULT)] + FakeResponse(200, GET_SNAPSHOTS_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] @@ -1479,7 +1556,7 @@ def test_create_group_snapshot_non_cg( self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]] ) self.assertEqual(1, get_volume_type_extra_specs.call_count) - self.assertEqual(4, request.call_count) + self.assertEqual(5, request.call_count) actual = ( {'status': 'available'}, [{'id': TEST_SNAPSHOT[0]['id'], @@ -1498,6 +1575,7 @@ def test_create_group_snapshot_cg( is_group_a_cg_snapshot_type.return_value = True get_volume_type_extra_specs.return_value = {} request.side_effect = [FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT_PAIR), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), @@ -1509,7 +1587,7 @@ def test_create_group_snapshot_cg( self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]] ) self.assertEqual(1, get_volume_type_extra_specs.call_count) - self.assertEqual(5, request.call_count) + self.assertEqual(6, request.call_count) actual = ( None, [{'id': TEST_SNAPSHOT[0]['id'], @@ -1520,7 +1598,7 @@ def test_create_group_snapshot_cg( @mock.patch.object(requests.Session, "request") def test_delete_group_snapshot(self, request): - request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR), + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR_SNAP), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), diff --git a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_iscsi.py b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_iscsi.py index 004ad5d3ef9..698c6571552 100644 --- a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_iscsi.py +++ b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_iscsi.py @@ -1,4 +1,4 @@ -# Copyright (C) 2020, 2023, Hitachi, Ltd. +# Copyright (C) 2020, 2024, Hitachi, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -194,6 +194,18 @@ def _volume_get(context, volume_id): "poolId": 30, "dataReductionStatus": "DISABLED", "dataReductionMode": "disabled", + "label": "00000000000000000000000000000000", +} + +GET_LDEV_RESULT_SNAP = { + "emulationType": "OPEN-V-CVS", + "blockCapacity": 2097152, + "attributes": ["CVS", "HDP"], + "status": "NML", + "poolId": 30, + "dataReductionStatus": "DISABLED", + "dataReductionMode": "disabled", + "label": "10000000000000000000000000000000", } GET_LDEV_RESULT_MAPPED = { @@ -216,6 +228,7 @@ def _volume_get(context, volume_id): "blockCapacity": 2097152, "attributes": ["CVS", "HDP", "HTI"], "status": "NML", + "label": "10000000000000000000000000000000", } GET_POOLS_RESULT = { @@ -495,8 +508,7 @@ def test_do_setup(self, brick_get_connector_properties, request): self.assertEqual(1, brick_get_connector_properties.call_count) self.assertEqual(6, request.call_count) # stop the Loopingcall within the do_setup treatment - self.driver.common.client.keep_session_loop.stop() - self.driver.common.client.keep_session_loop.wait() + drv.common.client.keep_session_loop.stop() @mock.patch.object(requests.Session, "request") @mock.patch.object( @@ -526,8 +538,7 @@ def test_do_setup_create_hg(self, brick_get_connector_properties, request): self.assertEqual(1, brick_get_connector_properties.call_count) self.assertEqual(9, request.call_count) # stop the Loopingcall within the do_setup treatment - self.driver.common.client.keep_session_loop.stop() - self.driver.common.client.keep_session_loop.wait() + drv.common.client.keep_session_loop.stop() @mock.patch.object(requests.Session, "request") @mock.patch.object( @@ -557,8 +568,7 @@ def test_do_setup_create_hg_format( self.assertEqual(1, brick_get_connector_properties.call_count) self.assertEqual(9, request.call_count) # stop the Loopingcall within the do_setup treatment - self.driver.common.client.keep_session_loop.stop() - self.driver.common.client.keep_session_loop.wait() + drv.common.client.keep_session_loop.stop() @mock.patch.object(requests.Session, "request") @mock.patch.object( @@ -631,24 +641,31 @@ def test_create_snapshot( request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), - FakeResponse(200, GET_SNAPSHOTS_RESULT)] + FakeResponse(200, GET_SNAPSHOTS_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] ret = self.driver.create_snapshot(TEST_SNAPSHOT[0]) self.assertEqual('1', ret['provider_location']) self.assertEqual(1, get_volume_type_extra_specs.call_count) - self.assertEqual(4, request.call_count) + self.assertEqual(5, request.call_count) @mock.patch.object(requests.Session, "request") def test_delete_snapshot(self, request): - request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_SNAP), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.delete_snapshot(TEST_SNAPSHOT[0]) self.assertEqual(4, request.call_count) + @mock.patch.object(requests.Session, "request") + def test_delete_snapshot_is_invalid_ldev(self, request): + request.return_value = FakeResponse(200, GET_LDEV_RESULT) + self.driver.delete_snapshot(TEST_SNAPSHOT[0]) + self.assertEqual(1, request.call_count) + @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') def test_create_cloned_volume( @@ -863,14 +880,12 @@ def test_copy_image_to_volume(self, request): @mock.patch.object(requests.Session, "request") def test_update_migrated_volume(self, request): request.return_value = FakeResponse(202, COMPLETED_SUCCEEDED_RESULT) - self.assertRaises( - NotImplementedError, - self.driver.update_migrated_volume, - self.ctxt, - TEST_VOLUME[0], - TEST_VOLUME[1], - "available") + ret = self.driver.update_migrated_volume( + self.ctxt, TEST_VOLUME[0], TEST_VOLUME[1], "available") self.assertEqual(1, request.call_count) + actual = ({'_name_id': TEST_VOLUME[1]['id'], + 'provider_location': TEST_VOLUME[1]['provider_location']}) + self.assertEqual(actual, ret) def test_unmanage_snapshot(self): """The driver don't support unmange_snapshot.""" @@ -1034,7 +1049,8 @@ def test_create_group_snapshot_non_cg( request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), - FakeResponse(200, GET_SNAPSHOTS_RESULT)] + FakeResponse(200, GET_SNAPSHOTS_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] @@ -1042,7 +1058,7 @@ def test_create_group_snapshot_non_cg( self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]] ) self.assertEqual(1, get_volume_type_extra_specs.call_count) - self.assertEqual(4, request.call_count) + self.assertEqual(5, request.call_count) actual = ( {'status': 'available'}, [{'id': TEST_SNAPSHOT[0]['id'], @@ -1061,6 +1077,7 @@ def test_create_group_snapshot_cg( is_group_a_cg_snapshot_type.return_value = True get_volume_type_extra_specs.return_value = {} request.side_effect = [FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT_PAIR), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), @@ -1072,7 +1089,7 @@ def test_create_group_snapshot_cg( self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]] ) self.assertEqual(1, get_volume_type_extra_specs.call_count) - self.assertEqual(5, request.call_count) + self.assertEqual(6, request.call_count) actual = ( None, [{'id': TEST_SNAPSHOT[0]['id'], diff --git a/cinder/tests/unit/volume/drivers/hpe/xp/__init__.py b/cinder/tests/unit/volume/drivers/hpe/xp/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cinder/tests/unit/volume/drivers/hpe/xp/test_hpe_xp_rest_fc.py b/cinder/tests/unit/volume/drivers/hpe/xp/test_hpe_xp_rest_fc.py index 1ba2cae68fe..4e2d4f83e9b 100644 --- a/cinder/tests/unit/volume/drivers/hpe/xp/test_hpe_xp_rest_fc.py +++ b/cinder/tests/unit/volume/drivers/hpe/xp/test_hpe_xp_rest_fc.py @@ -1,4 +1,4 @@ -# Copyright (C) 2022, 2023, Hewlett Packard Enterprise, Ltd. +# Copyright (C) 2022, 2024, Hewlett Packard Enterprise, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -187,6 +187,7 @@ def _volume_get(context, volume_id): "attributes": ["CVS", "THP"], "status": "NML", "poolId": 30, + "label": "00000000000000000000000000000000", } GET_LDEV_RESULT_MAPPED = { @@ -204,11 +205,29 @@ def _volume_get(context, volume_id): ], } +GET_LDEV_RESULT_SNAP = { + "emulationType": "OPEN-V-CVS", + "blockCapacity": 2097152, + "attributes": ["CVS", "THP"], + "status": "NML", + "poolId": 30, + "label": "10000000000000000000000000000000", +} + GET_LDEV_RESULT_PAIR = { "emulationType": "OPEN-V-CVS", "blockCapacity": 2097152, "attributes": ["CVS", "THP", "FS"], "status": "NML", + "label": "00000000000000000000000000000000", +} + +GET_LDEV_RESULT_PAIR_SNAP = { + "emulationType": "OPEN-V-CVS", + "blockCapacity": 2097152, + "attributes": ["CVS", "THP", "FS"], + "status": "NML", + "label": "10000000000000000000000000000000", } GET_POOL_RESULT = { @@ -578,6 +597,9 @@ def test_do_setup_create_hg(self, brick_get_connector_properties, request): self.assertEqual( {CONFIG_MAP['port_id']: CONFIG_MAP['target_wwn']}, drv.common.storage_info['wwns']) + self.assertEqual(CONFIG_MAP['host_grp_name'], + drv.common.format_info['group_name_format'].format( + wwn=min(DEFAULT_CONNECTOR['wwpns']))) self.assertEqual(1, brick_get_connector_properties.call_count) self.assertEqual(8, request.call_count) # stop the Loopingcall within the do_setup treatment @@ -698,17 +720,18 @@ def test_create_snapshot(self, volume_get, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), - FakeResponse(200, GET_SNAPSHOTS_RESULT)] + FakeResponse(200, GET_SNAPSHOTS_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] ret = self.driver.create_snapshot(TEST_SNAPSHOT[0]) self.assertEqual('1', ret['provider_location']) - self.assertEqual(4, request.call_count) + self.assertEqual(5, request.call_count) @mock.patch.object(requests.Session, "request") def test_delete_snapshot(self, request): - request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR), + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR_SNAP), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), @@ -724,7 +747,7 @@ def test_delete_snapshot(self, request): @mock.patch.object(requests.Session, "request") def test_delete_snapshot_no_pair(self, request): """Normal case: Delete a snapshot without pair.""" - request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_SNAP), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] @@ -945,14 +968,12 @@ def test_copy_image_to_volume(self, request): @mock.patch.object(requests.Session, "request") def test_update_migrated_volume(self, request): request.return_value = FakeResponse(202, COMPLETED_SUCCEEDED_RESULT) - self.assertRaises( - NotImplementedError, - self.driver.update_migrated_volume, - self.ctxt, - TEST_VOLUME[0], - TEST_VOLUME[1], - "available") + ret = self.driver.update_migrated_volume( + self.ctxt, TEST_VOLUME[0], TEST_VOLUME[1], "available") self.assertEqual(1, request.call_count) + actual = ({'_name_id': TEST_VOLUME[1]['id'], + 'provider_location': TEST_VOLUME[1]['provider_location']}) + self.assertEqual(actual, ret) def test_unmanage_snapshot(self): """The driver don't support unmange_snapshot.""" @@ -1089,14 +1110,15 @@ def test_create_group_snapshot_non_cg( request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), - FakeResponse(200, GET_SNAPSHOTS_RESULT)] + FakeResponse(200, GET_SNAPSHOTS_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] ret = self.driver.create_group_snapshot( self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]] ) - self.assertEqual(4, request.call_count) + self.assertEqual(5, request.call_count) actual = ( {'status': 'available'}, [{'id': TEST_SNAPSHOT[0]['id'], @@ -1112,6 +1134,7 @@ def test_create_group_snapshot_cg( self, is_group_a_cg_snapshot_type, volume_get, request): is_group_a_cg_snapshot_type.return_value = True request.side_effect = [FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT_PAIR), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), @@ -1122,7 +1145,7 @@ def test_create_group_snapshot_cg( ret = self.driver.create_group_snapshot( self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]] ) - self.assertEqual(5, request.call_count) + self.assertEqual(6, request.call_count) actual = ( None, [{'id': TEST_SNAPSHOT[0]['id'], @@ -1133,7 +1156,7 @@ def test_create_group_snapshot_cg( @mock.patch.object(requests.Session, "request") def test_delete_group_snapshot(self, request): - request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR), + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR_SNAP), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), diff --git a/cinder/tests/unit/volume/drivers/hpe/xp/test_hpe_xp_rest_iscsi.py b/cinder/tests/unit/volume/drivers/hpe/xp/test_hpe_xp_rest_iscsi.py index f6f4f84e755..7173edc42f2 100644 --- a/cinder/tests/unit/volume/drivers/hpe/xp/test_hpe_xp_rest_iscsi.py +++ b/cinder/tests/unit/volume/drivers/hpe/xp/test_hpe_xp_rest_iscsi.py @@ -1,4 +1,4 @@ -# Copyright (C) 2022, 2023. Hewlett Packard Enterprise, Ltd. +# Copyright (C) 2022, 2024, Hewlett Packard Enterprise, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -190,6 +190,7 @@ def _volume_get(context, volume_id): "attributes": ["CVS", "THP"], "status": "NML", "poolId": 30, + "label": "00000000000000000000000000000000", } GET_LDEV_RESULT_MAPPED = { @@ -207,11 +208,21 @@ def _volume_get(context, volume_id): ], } +GET_LDEV_RESULT_SNAP = { + "emulationType": "OPEN-V-CVS", + "blockCapacity": 2097152, + "attributes": ["CVS", "THP"], + "status": "NML", + "poolId": 30, + "label": "10000000000000000000000000000000", +} + GET_LDEV_RESULT_PAIR = { "emulationType": "OPEN-V-CVS", "blockCapacity": 2097152, "attributes": ["CVS", "THP", "FS"], "status": "NML", + "label": "10000000000000000000000000000000", } GET_POOLS_RESULT = { @@ -490,6 +501,9 @@ def test_do_setup_create_hg(self, brick_get_connector_properties, request): 'ip': CONFIG_MAP['ipv4Address'], 'port': CONFIG_MAP['tcpPort']}}, drv.common.storage_info['portals']) + self.assertEqual(CONFIG_MAP['host_grp_name'], + drv.common.format_info['group_name_format'].format( + ip=DEFAULT_CONNECTOR['ip'])) self.assertEqual(1, brick_get_connector_properties.call_count) self.assertEqual(8, request.call_count) # stop the Loopingcall within the do_setup treatment @@ -545,17 +559,18 @@ def test_create_snapshot(self, volume_get, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), - FakeResponse(200, GET_SNAPSHOTS_RESULT)] + FakeResponse(200, GET_SNAPSHOTS_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] ret = self.driver.create_snapshot(TEST_SNAPSHOT[0]) self.assertEqual('1', ret['provider_location']) - self.assertEqual(4, request.call_count) + self.assertEqual(5, request.call_count) @mock.patch.object(requests.Session, "request") def test_delete_snapshot(self, request): - request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_SNAP), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] @@ -758,14 +773,12 @@ def test_copy_image_to_volume(self, request): @mock.patch.object(requests.Session, "request") def test_update_migrated_volume(self, request): request.return_value = FakeResponse(202, COMPLETED_SUCCEEDED_RESULT) - self.assertRaises( - NotImplementedError, - self.driver.update_migrated_volume, - self.ctxt, - TEST_VOLUME[0], - TEST_VOLUME[1], - "available") + ret = self.driver.update_migrated_volume( + self.ctxt, TEST_VOLUME[0], TEST_VOLUME[1], "available") self.assertEqual(1, request.call_count) + actual = ({'_name_id': TEST_VOLUME[1]['id'], + 'provider_location': TEST_VOLUME[1]['provider_location']}) + self.assertEqual(actual, ret) def test_unmanage_snapshot(self): """The driver don't support unmange_snapshot.""" @@ -896,14 +909,15 @@ def test_create_group_snapshot_non_cg( request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), - FakeResponse(200, GET_SNAPSHOTS_RESULT)] + FakeResponse(200, GET_SNAPSHOTS_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] ret = self.driver.create_group_snapshot( self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]] ) - self.assertEqual(4, request.call_count) + self.assertEqual(5, request.call_count) actual = ( {'status': 'available'}, [{'id': TEST_SNAPSHOT[0]['id'], @@ -919,6 +933,7 @@ def test_create_group_snapshot_cg( self, is_group_a_cg_snapshot_type, volume_get, request): is_group_a_cg_snapshot_type.return_value = True request.side_effect = [FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT_PAIR), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), @@ -929,7 +944,7 @@ def test_create_group_snapshot_cg( ret = self.driver.create_group_snapshot( self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]] ) - self.assertEqual(5, request.call_count) + self.assertEqual(6, request.call_count) actual = ( None, [{'id': TEST_SNAPSHOT[0]['id'], diff --git a/cinder/tests/unit/volume/drivers/nec/v/test_internal_nec_rest_fc.py b/cinder/tests/unit/volume/drivers/nec/v/test_internal_nec_rest_fc.py index 23aef4da74d..0cc49dd31e1 100644 --- a/cinder/tests/unit/volume/drivers/nec/v/test_internal_nec_rest_fc.py +++ b/cinder/tests/unit/volume/drivers/nec/v/test_internal_nec_rest_fc.py @@ -1,4 +1,4 @@ -# Copyright (C) 2021, 2023, NEC corporation +# Copyright (C) 2021, 2024, NEC corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -187,6 +187,7 @@ def _volume_get(context, volume_id): "attributes": ["CVS", "DP"], "status": "NML", "poolId": 30, + "label": "00000000000000000000000000000000", } GET_LDEV_RESULT_MAPPED = { @@ -204,11 +205,29 @@ def _volume_get(context, volume_id): ], } +GET_LDEV_RESULT_SNAP = { + "emulationType": "OPEN-V-CVS", + "blockCapacity": 2097152, + "attributes": ["CVS", "DP"], + "status": "NML", + "poolId": 30, + "label": "10000000000000000000000000000000", +} + GET_LDEV_RESULT_PAIR = { "emulationType": "OPEN-V-CVS", "blockCapacity": 2097152, "attributes": ["CVS", "DP", "SS"], "status": "NML", + "label": "00000000000000000000000000000000", +} + +GET_LDEV_RESULT_PAIR_SNAP = { + "emulationType": "OPEN-V-CVS", + "blockCapacity": 2097152, + "attributes": ["CVS", "DP", "SS"], + "status": "NML", + "label": "10000000000000000000000000000000", } GET_SNAPSHOTS_RESULT = { @@ -446,10 +465,9 @@ def _setup_config(self): self.configuration.nec_v_lun_retry_interval = ( hbsd_rest._LUN_RETRY_INTERVAL) self.configuration.nec_v_restore_timeout = hbsd_rest._RESTORE_TIMEOUT - self.configuration.nec_v_state_transition_timeout = ( - hbsd_rest._STATE_TRANSITION_TIMEOUT) + self.configuration.nec_v_state_transition_timeout = 2 self.configuration.nec_v_lock_timeout = hbsd_rest_api._LOCK_TIMEOUT - self.configuration.nec_v_rest_timeout = hbsd_rest_api._REST_TIMEOUT + self.configuration.nec_v_rest_timeout = 3 self.configuration.nec_v_extend_timeout = ( hbsd_rest_api._EXTEND_TIMEOUT) self.configuration.nec_v_exec_retry_interval = ( @@ -692,17 +710,18 @@ def test_create_snapshot(self, volume_get, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), - FakeResponse(200, GET_SNAPSHOTS_RESULT)] + FakeResponse(200, GET_SNAPSHOTS_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] ret = self.driver.create_snapshot(TEST_SNAPSHOT[0]) self.assertEqual('1', ret['provider_location']) - self.assertEqual(4, request.call_count) + self.assertEqual(5, request.call_count) @mock.patch.object(requests.Session, "request") def test_delete_snapshot(self, request): - request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR), + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR_SNAP), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), @@ -718,7 +737,7 @@ def test_delete_snapshot(self, request): @mock.patch.object(requests.Session, "request") def test_delete_snapshot_no_pair(self, request): """Normal case: Delete a snapshot without pair.""" - request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_SNAP), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] @@ -939,14 +958,12 @@ def test_copy_image_to_volume(self, request): @mock.patch.object(requests.Session, "request") def test_update_migrated_volume(self, request): request.return_value = FakeResponse(202, COMPLETED_SUCCEEDED_RESULT) - self.assertRaises( - NotImplementedError, - self.driver.update_migrated_volume, - self.ctxt, - TEST_VOLUME[0], - TEST_VOLUME[1], - "available") + ret = self.driver.update_migrated_volume( + self.ctxt, TEST_VOLUME[0], TEST_VOLUME[1], "available") self.assertEqual(1, request.call_count) + actual = ({'_name_id': TEST_VOLUME[1]['id'], + 'provider_location': TEST_VOLUME[1]['provider_location']}) + self.assertEqual(actual, ret) def test_unmanage_snapshot(self): """The driver don't support unmange_snapshot.""" @@ -1083,14 +1100,15 @@ def test_create_group_snapshot_non_cg( request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), - FakeResponse(200, GET_SNAPSHOTS_RESULT)] + FakeResponse(200, GET_SNAPSHOTS_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] ret = self.driver.create_group_snapshot( self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]] ) - self.assertEqual(4, request.call_count) + self.assertEqual(5, request.call_count) actual = ( {'status': 'available'}, [{'id': TEST_SNAPSHOT[0]['id'], @@ -1106,6 +1124,7 @@ def test_create_group_snapshot_cg( self, is_group_a_cg_snapshot_type, volume_get, request): is_group_a_cg_snapshot_type.return_value = True request.side_effect = [FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT_PAIR), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), @@ -1116,7 +1135,7 @@ def test_create_group_snapshot_cg( ret = self.driver.create_group_snapshot( self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]] ) - self.assertEqual(5, request.call_count) + self.assertEqual(6, request.call_count) actual = ( None, [{'id': TEST_SNAPSHOT[0]['id'], @@ -1127,7 +1146,7 @@ def test_create_group_snapshot_cg( @mock.patch.object(requests.Session, "request") def test_delete_group_snapshot(self, request): - request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR), + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR_SNAP), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), diff --git a/cinder/tests/unit/volume/drivers/nec/v/test_internal_nec_rest_iscsi.py b/cinder/tests/unit/volume/drivers/nec/v/test_internal_nec_rest_iscsi.py index 2c76e2a5172..b1ade2db115 100644 --- a/cinder/tests/unit/volume/drivers/nec/v/test_internal_nec_rest_iscsi.py +++ b/cinder/tests/unit/volume/drivers/nec/v/test_internal_nec_rest_iscsi.py @@ -1,4 +1,4 @@ -# Copyright (C) 2021, 2023, NEC corporation +# Copyright (C) 2021, 2024, NEC corporation # # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -191,6 +191,7 @@ def _volume_get(context, volume_id): "attributes": ["CVS", "DP"], "status": "NML", "poolId": 30, + "label": "00000000000000000000000000000000", } GET_LDEV_RESULT_MAPPED = { @@ -208,11 +209,29 @@ def _volume_get(context, volume_id): ], } +GET_LDEV_RESULT_SNAP = { + "emulationType": "OPEN-V-CVS", + "blockCapacity": 2097152, + "attributes": ["CVS", "DP"], + "status": "NML", + "poolId": 30, + "label": "10000000000000000000000000000000", +} + GET_LDEV_RESULT_PAIR = { "emulationType": "OPEN-V-CVS", "blockCapacity": 2097152, "attributes": ["CVS", "DP", "SS"], "status": "NML", + "label": "00000000000000000000000000000000", +} + +GET_LDEV_RESULT_PAIR_SNAP = { + "emulationType": "OPEN-V-CVS", + "blockCapacity": 2097152, + "attributes": ["CVS", "DP", "SS"], + "status": "NML", + "label": "10000000000000000000000000000000", } GET_POOLS_RESULT = { @@ -584,17 +603,18 @@ def test_create_snapshot(self, volume_get, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), - FakeResponse(200, GET_SNAPSHOTS_RESULT)] + FakeResponse(200, GET_SNAPSHOTS_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] ret = self.driver.create_snapshot(TEST_SNAPSHOT[0]) self.assertEqual('1', ret['provider_location']) - self.assertEqual(4, request.call_count) + self.assertEqual(5, request.call_count) @mock.patch.object(requests.Session, "request") def test_delete_snapshot(self, request): - request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_SNAP), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] @@ -797,14 +817,12 @@ def test_copy_image_to_volume(self, request): @mock.patch.object(requests.Session, "request") def test_update_migrated_volume(self, request): request.return_value = FakeResponse(202, COMPLETED_SUCCEEDED_RESULT) - self.assertRaises( - NotImplementedError, - self.driver.update_migrated_volume, - self.ctxt, - TEST_VOLUME[0], - TEST_VOLUME[1], - "available") + ret = self.driver.update_migrated_volume( + self.ctxt, TEST_VOLUME[0], TEST_VOLUME[1], "available") self.assertEqual(1, request.call_count) + actual = ({'_name_id': TEST_VOLUME[1]['id'], + 'provider_location': TEST_VOLUME[1]['provider_location']}) + self.assertEqual(actual, ret) def test_unmanage_snapshot(self): """The driver don't support unmange_snapshot.""" @@ -935,14 +953,15 @@ def test_create_group_snapshot_non_cg( request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), - FakeResponse(200, GET_SNAPSHOTS_RESULT)] + FakeResponse(200, GET_SNAPSHOTS_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] ret = self.driver.create_group_snapshot( self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]] ) - self.assertEqual(4, request.call_count) + self.assertEqual(5, request.call_count) actual = ( {'status': 'available'}, [{'id': TEST_SNAPSHOT[0]['id'], @@ -958,6 +977,7 @@ def test_create_group_snapshot_cg( self, is_group_a_cg_snapshot_type, volume_get, request): is_group_a_cg_snapshot_type.return_value = True request.side_effect = [FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT_PAIR), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), @@ -968,7 +988,7 @@ def test_create_group_snapshot_cg( ret = self.driver.create_group_snapshot( self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]] ) - self.assertEqual(5, request.call_count) + self.assertEqual(6, request.call_count) actual = ( None, [{'id': TEST_SNAPSHOT[0]['id'], @@ -979,7 +999,7 @@ def test_create_group_snapshot_cg( @mock.patch.object(requests.Session, "request") def test_delete_group_snapshot(self, request): - request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR), + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR_SNAP), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), diff --git a/cinder/tests/unit/volume/drivers/test_datera.py b/cinder/tests/unit/volume/drivers/test_datera.py index de7eb4f5c52..1d28a3200c3 100644 --- a/cinder/tests/unit/volume/drivers/test_datera.py +++ b/cinder/tests/unit/volume/drivers/test_datera.py @@ -20,7 +20,6 @@ from cinder import context from cinder import exception from cinder.tests.unit import test -from cinder import version from cinder.volume import configuration as conf from cinder.volume import volume_types @@ -430,21 +429,13 @@ def test_get_manageable_volumes(self): offset = mock.MagicMock() sort_keys = mock.MagicMock() sort_dirs = mock.MagicMock() - if (version.version_string() >= '15.0.0'): - with mock.patch( - 'cinder.volume.volume_utils.paginate_entries_list') \ - as mpage: - self.driver.get_manageable_volumes( - [testvol], marker, limit, offset, sort_keys, sort_dirs) - mpage.assert_called_once_with( - [v1, v2], marker, limit, offset, sort_keys, sort_dirs) - else: - with mock.patch( - 'cinder.volume.utils.paginate_entries_list') as mpage: - self.driver.get_manageable_volumes( - [testvol], marker, limit, offset, sort_keys, sort_dirs) - mpage.assert_called_once_with( - [v1, v2], marker, limit, offset, sort_keys, sort_dirs) + with mock.patch( + 'cinder.volume.volume_utils.paginate_entries_list') \ + as mpage: + self.driver.get_manageable_volumes( + [testvol], marker, limit, offset, sort_keys, sort_dirs) + mpage.assert_called_once_with( + [v1, v2], marker, limit, offset, sort_keys, sort_dirs) def test_unmanage(self): testvol = _stub_volume() diff --git a/cinder/tests/unit/volume/drivers/test_pure.py b/cinder/tests/unit/volume/drivers/test_pure.py index ff329a47809..9b6fa708f00 100644 --- a/cinder/tests/unit/volume/drivers/test_pure.py +++ b/cinder/tests/unit/volume/drivers/test_pure.py @@ -250,6 +250,7 @@ def _decorator(f): ISCSI_IPS[2] + ":" + TARGET_PORT, ISCSI_IPS[3] + ":" + TARGET_PORT], "wwn": "3624a93709714b5cb91634c470002b2c8", + "addressing_mode": "SAM2", }, } ISCSI_CONNECTION_INFO_V6 = { @@ -264,6 +265,7 @@ def _decorator(f): ISCSI_IPS[6] + ":" + TARGET_PORT, ISCSI_IPS[7] + ":" + TARGET_PORT], "wwn": "3624a93709714b5cb91634c470002b2c8", + "addressing_mode": "SAM2", }, } ISCSI_CONNECTION_INFO_AC = { @@ -285,6 +287,7 @@ def _decorator(f): AC_ISCSI_IPS[2] + ":" + TARGET_PORT, AC_ISCSI_IPS[3] + ":" + TARGET_PORT], "wwn": "3624a93709714b5cb91634c470002b2c8", + "addressing_mode": "SAM2", }, } ISCSI_CONNECTION_INFO_AC_FILTERED = { @@ -307,6 +310,7 @@ def _decorator(f): AC_ISCSI_IPS[1] + ":" + TARGET_PORT, AC_ISCSI_IPS[2] + ":" + TARGET_PORT], "wwn": "3624a93709714b5cb91634c470002b2c8", + "addressing_mode": "SAM2", }, } ISCSI_CONNECTION_INFO_AC_FILTERED_LIST = { @@ -324,6 +328,7 @@ def _decorator(f): AC_ISCSI_IPS[5] + ":" + TARGET_PORT, # IPv6 AC_ISCSI_IPS[6] + ":" + TARGET_PORT], # IPv6 "wwn": "3624a93709714b5cb91634c470002b2c8", + "addressing_mode": "SAM2", }, } @@ -411,6 +416,7 @@ def _decorator(f): "initiator_target_map": INITIATOR_TARGET_MAP, "discard": True, "wwn": "3624a93709714b5cb91634c470002b2c8", + "addressing_mode": "SAM2", }, } FC_CONNECTION_INFO_AC = { @@ -424,6 +430,7 @@ def _decorator(f): "initiator_target_map": AC_INITIATOR_TARGET_MAP, "discard": True, "wwn": "3624a93709714b5cb91634c470002b2c8", + "addressing_mode": "SAM2", }, } PURE_SNAPSHOT = { diff --git a/cinder/tests/unit/volume/drivers/test_rbd.py b/cinder/tests/unit/volume/drivers/test_rbd.py index f1ffeb89ee4..9952f8a722d 100644 --- a/cinder/tests/unit/volume/drivers/test_rbd.py +++ b/cinder/tests/unit/volume/drivers/test_rbd.py @@ -193,6 +193,7 @@ def _make_configuration(cls, conf_in=None): cfg.rados_connection_interval = 5 cfg.backup_use_temp_snapshot = False cfg.enable_deferred_deletion = False + cfg.rbd_concurrent_flatten_operations = 3 # Because the mocked conf doesn't actually have an underlying oslo conf # it doesn't have the set_default method, so we use a fake one. @@ -792,15 +793,15 @@ def test_delete_volume(self): self.driver.delete_volume(self.volume_a) mock_get_clone_info.assert_called_once_with( - self.mock_rbd.Image.return_value, + self.mock_proxy.return_value.__enter__.return_value, self.volume_a.name, None) - (self.driver.rbd.Image.return_value - .list_snaps.assert_called_once_with()) + self.mock_proxy.return_value.__enter__.return_value.\ + list_snaps.assert_called_once_with() client.__enter__.assert_called_once_with() client.__exit__.assert_called_once_with(None, None, None) mock_delete_backup_snaps.assert_called_once_with( - self.mock_rbd.Image.return_value) + self.mock_proxy.return_value.__enter__.return_value) self.assertFalse( self.driver.rbd.Image.return_value.unprotect_snap.called) self.assertEqual( @@ -827,16 +828,16 @@ def test_delete_volume_clone_info_return_parent(self): self.driver.delete_volume(self.volume_a) mock_get_clone_info.assert_called_once_with( - self.mock_rbd.Image.return_value, + self.mock_proxy.return_value.__enter__.return_value, self.volume_a.name, None) - m_del_clone_parent_refs.assert_called_once() - (self.driver.rbd.Image.return_value - .list_snaps.assert_called_once_with()) + m_del_clone_parent_refs.assert_not_called() + self.mock_proxy.return_value.__enter__.return_value.list_snaps.\ + assert_called_once_with() client.__enter__.assert_called_once_with() client.__exit__.assert_called_once_with(None, None, None) m_del_back_snaps.assert_called_once_with( - self.mock_rbd.Image.return_value) + self.mock_proxy.return_value.__enter__.return_value) self.assertFalse( self.driver.rbd.Image.return_value.unprotect_snap.called) self.assertEqual( @@ -859,18 +860,17 @@ def test_deferred_deletion(self): drv.delete_volume(self.volume_a) mock_get_clone_info.assert_called_once_with( - drv.rbd.Image.return_value, + self.mock_proxy.return_value.__enter__.return_value, self.volume_a.name, None) - drv.rbd.Image.return_value.list_snaps.assert_called_once_with() client.__enter__.assert_called_once_with() client.__exit__.assert_called_once_with(None, None, None) mock_delete_backup_snaps.assert_called_once_with( - drv.rbd.Image.return_value) + self.mock_proxy.return_value.__enter__.return_value) self.assertFalse( drv.rbd.Image.return_value.unprotect_snap.called) self.assertEqual( - 1, drv.rbd.RBD.return_value.trash_move.call_count) + 0, drv.rbd.RBD.return_value.trash_move.call_count) self.driver.rbd.RBD.return_value.remove.assert_not_called() @common_mocks @@ -926,7 +926,7 @@ def test_deferred_deletion_w_parent(self): drv.delete_volume(self.volume_a) self.assertEqual( - 1, drv.rbd.RBD.return_value.trash_move.call_count) + 0, drv.rbd.RBD.return_value.trash_move.call_count) @common_mocks def test_deferred_deletion_w_deleted_parent(self): @@ -940,16 +940,18 @@ def test_deferred_deletion_w_deleted_parent(self): drv.delete_volume(self.volume_a) self.assertEqual( - 2, drv.rbd.RBD.return_value.trash_move.call_count) + 0, drv.rbd.RBD.return_value.trash_move.call_count) @common_mocks def test_delete_volume_not_found_at_open(self): self.mock_rbd.Image.side_effect = self.mock_rbd.ImageNotFound + self.mock_proxy.side_effect = self.mock_rbd.ImageNotFound self.assertIsNone(self.driver.delete_volume(self.volume_a)) with mock.patch.object(driver, 'RADOSClient') as client: client = self.mock_client.return_value.__enter__.return_value - self.mock_rbd.Image.assert_called_once_with(client.ioctx, - self.volume_a.name) + self.mock_proxy.assert_called_once_with(self.driver, + self.volume_a.name, + ioctx=client.ioctx) # Make sure the exception was raised self.assertEqual([self.mock_rbd.ImageNotFound], RAISED_EXCEPTIONS) @@ -958,45 +960,103 @@ def test_delete_busy_volume(self): self.mock_rbd.Image.return_value.list_snaps.return_value = [] self.mock_rbd.RBD.return_value.remove.side_effect = ( - self.mock_rbd.ImageBusy) - - mock_get_clone_info = self.mock_object(self.driver, '_get_clone_info') - mock_get_clone_info.return_value = (None, None, None) + self.mock_rbd.ImageBusy, + None) mock_delete_backup_snaps = self.mock_object(self.driver, '_delete_backup_snaps') mock_rados_client = self.mock_object(driver, 'RADOSClient') + mock_flatten = self.mock_object(self.driver, '_flatten') + + with mock.patch.object(self.driver, '_get_clone_info') as \ + mock_get_clone_info: + mock_get_clone_info.return_value = (None, None, None) + self.driver.rbd.Image.return_value.list_children.\ + return_value = [('pool1', 'child1'), + ('pool1', 'child2')] + self.mock_proxy.return_value.__enter__.return_value.list_children.\ + return_value = [('pool1', 'child1'), ('pool1', 'child2')] + self.driver.delete_volume(self.volume_a) + + mock_flatten.assert_has_calls( + [mock.call('pool1', 'child1'), + mock.call('pool1', 'child2')]) + + mock_get_clone_info.assert_called_once_with( + self.mock_proxy.return_value.__enter__.return_value, + self.volume_a.name, + None) + self.mock_proxy.return_value.__enter__.return_value.list_snaps.\ + assert_called_once_with() + mock_rados_client.assert_called_once_with(self.driver) + mock_delete_backup_snaps.assert_called_once_with( + self.mock_proxy.return_value.__enter__.return_value) + self.assertFalse( + self.mock_rbd.Image.return_value.unprotect_snap. + called) + self.assertEqual( + 2, + self.mock_rbd.RBD.return_value.remove.call_count) + self.assertEqual(1, len(RAISED_EXCEPTIONS)) + # Make sure the exception was raised + self.assertIn(self.mock_rbd.ImageBusy, + RAISED_EXCEPTIONS) + + self.mock_rbd.RBD.return_value.trash_move.assert_not_called() + + @common_mocks + def test_delete_volume_has_snapshots(self): + self.mock_rbd.Image.return_value.list_snaps.return_value = [] + + self.mock_rbd.RBD.return_value.remove.side_effect = ( + self.mock_rbd.ImageHasSnapshots, # initial vol remove attempt + None # removal of child image + ) + mock_get_clone_info = self.mock_object(self.driver, + '_get_clone_info', + return_value=(None, + None, + None)) + m_del_backup_snaps = self.mock_object(self.driver, + '_delete_backup_snaps') + + mock_try_remove_volume = self.mock_object(self.driver, + '_try_remove_volume', + return_value=True) + + mock_rados_client = self.mock_object(driver, 'RADOSClient') self.driver.delete_volume(self.volume_a) mock_get_clone_info.assert_called_once_with( - self.mock_rbd.Image.return_value, + self.mock_proxy.return_value.__enter__.return_value, self.volume_a.name, None) - self.mock_rbd.Image.return_value.list_snaps.assert_called_once_with() mock_rados_client.assert_called_once_with(self.driver) - mock_delete_backup_snaps.assert_called_once_with( - self.mock_rbd.Image.return_value) + m_del_backup_snaps.assert_called_once_with( + self.mock_proxy.return_value.__enter__.return_value) self.assertFalse( self.mock_rbd.Image.return_value.unprotect_snap.called) self.assertEqual( 1, self.mock_rbd.RBD.return_value.remove.call_count) self.assertEqual(1, len(RAISED_EXCEPTIONS)) # Make sure the exception was raised - self.assertIn(self.mock_rbd.ImageBusy, RAISED_EXCEPTIONS) + self.assertIn(self.mock_rbd.ImageHasSnapshots, + RAISED_EXCEPTIONS) - self.mock_rbd.RBD.return_value.trash_move.\ - assert_called_once_with( - mock.ANY, - self.volume_a.name, - 0) + self.mock_rbd.RBD.return_value.trash_move.assert_not_called() + + mock_try_remove_volume.assert_called_once_with(mock.ANY, + self.volume_a.name) @common_mocks - def test_delete_volume_has_snapshots(self): + def test_delete_volume_has_snapshots_trash(self): self.mock_rbd.Image.return_value.list_snaps.return_value = [] self.mock_rbd.RBD.return_value.remove.side_effect = ( - self.mock_rbd.ImageHasSnapshots) + self.mock_rbd.ImageHasSnapshots, # initial vol remove attempt + None # removal of child image + ) mock_get_clone_info = self.mock_object(self.driver, '_get_clone_info', return_value=(None, @@ -1005,18 +1065,25 @@ def test_delete_volume_has_snapshots(self): m_del_backup_snaps = self.mock_object(self.driver, '_delete_backup_snaps') + mock_try_remove_volume = self.mock_object(self.driver, + '_try_remove_volume', + return_value=False) + + mock_trash_volume = self.mock_object(self.driver, + '_move_volume_to_trash') + with mock.patch.object(driver, 'RADOSClient') as mock_rados_client: self.driver.delete_volume(self.volume_a) mock_get_clone_info.assert_called_once_with( - self.mock_rbd.Image.return_value, + self.mock_proxy.return_value.__enter__.return_value, self.volume_a.name, None) - (self.mock_rbd.Image.return_value.list_snaps - .assert_called_once_with()) + self.mock_proxy.return_value.__enter__.return_value.list_snaps.\ + assert_called_once_with() mock_rados_client.assert_called_once_with(self.driver) m_del_backup_snaps.assert_called_once_with( - self.mock_rbd.Image.return_value) + self.mock_proxy.return_value.__enter__.return_value) self.assertFalse( self.mock_rbd.Image.return_value.unprotect_snap.called) self.assertEqual( @@ -1027,10 +1094,14 @@ def test_delete_volume_has_snapshots(self): RAISED_EXCEPTIONS) self.mock_rbd.RBD.return_value.trash_move.\ - assert_called_once_with( - mock.ANY, - self.volume_a.name, - 0) + assert_not_called() + + mock_trash_volume.assert_called_once_with(mock.ANY, + self.volume_a.name, + 0) + + mock_try_remove_volume.assert_called_once_with(mock.ANY, + self.volume_a.name) @common_mocks def test_delete_volume_not_found(self): @@ -1046,15 +1117,20 @@ def test_delete_volume_not_found(self): mock_get_clone_info = self.mock_object(self.driver, '_get_clone_info') mock_get_clone_info.return_value = (None, None, None) + mock_find_clone_snap = self.mock_object(self.driver, + '_find_clone_snap', + return_value=None) + self.assertIsNone(self.driver.delete_volume(self.volume_a)) + image = self.mock_proxy.return_value.__enter__.return_value mock_get_clone_info.assert_called_once_with( - self.mock_rbd.Image.return_value, + image, self.volume_a.name, None) - self.mock_rbd.Image.return_value.list_snaps.assert_called_once_with() + mock_find_clone_snap.assert_called_once_with(image) mock_rados_client.assert_called_once_with(self.driver) - mock_delete_backup_snaps.assert_called_once_with( - self.mock_rbd.Image.return_value) + mock_delete_backup_snaps.assert_called_once_with(image) + self.assertFalse( self.mock_rbd.Image.return_value.unprotect_snap.called) self.assertEqual( @@ -1083,21 +1159,22 @@ def test_delete_volume_w_clone_snaps(self): return_value=(None, None, None)) + + self.mock_object(self.driver, '_find_clone_snap', + return_value=snapshots[2]['name']) with mock.patch.object(self.driver, '_delete_backup_snaps') as \ mock_delete_backup_snaps: self.driver.delete_volume(self.volume_a) mock_get_clone_info.assert_called_once_with( - self.mock_rbd.Image.return_value, + self.mock_proxy.return_value.__enter__.return_value, self.volume_a.name, snapshots[2]['name']) - (self.driver.rbd.Image.return_value - .list_snaps.assert_called_once_with()) client.__enter__.assert_called_once_with() client.__exit__.assert_called_once_with(None, None, None) mock_delete_backup_snaps.assert_called_once_with( - self.mock_rbd.Image.return_value) + self.mock_proxy.return_value.__enter__.return_value) self.assertFalse( self.driver.rbd.Image.return_value.unprotect_snap.called) self.assertEqual( @@ -1281,26 +1358,40 @@ def test_delete_busy_snapshot(self, volume_get_by_id): proxy.__enter__.return_value = proxy proxy.unprotect_snap.side_effect = ( - self.mock_rbd.ImageBusy) + self.mock_rbd.ImageBusy, + None) - with mock.patch.object(self.driver, '_get_children_info') as \ - mock_get_children_info: - mock_get_children_info.return_value = [('pool', 'volume2')] + with mock.patch.object(self.driver, '_flatten_children') as \ + mock_flatten_children: + self.driver.delete_snapshot(self.snapshot) - with mock.patch.object(driver, 'LOG') as \ - mock_log: + mock_flatten_children.assert_called_once_with(mock.ANY, + self.volume_a.name, + self.snapshot.name) - self.assertRaises(exception.SnapshotIsBusy, - self.driver.delete_snapshot, - self.snapshot) + self.assertTrue(proxy.unprotect_snap.called) + self.assertTrue(proxy.remove_snap.called) - mock_get_children_info.assert_called_once_with( - proxy, - self.snapshot.name) + @common_mocks + @mock.patch.object(driver.RBDDriver, '_flatten') + @mock.patch('cinder.objects.Volume.get_by_id') + def test_delete_busy_snapshot_fail(self, volume_get_by_id, flatten_mock): + volume_get_by_id.return_value = self.volume_a + proxy = self.mock_proxy.return_value + proxy.__enter__.return_value = proxy - self.assertTrue(mock_log.info.called) - self.assertTrue(proxy.unprotect_snap.called) - self.assertFalse(proxy.remove_snap.called) + proxy.unprotect_snap.side_effect = ( + self.mock_rbd.ImageBusy, + self.mock_rbd.ImageBusy, + self.mock_rbd.ImageBusy) + flatten_mock.side_effect = exception.SnapshotIsBusy(self.snapshot.name) + + self.assertRaises(exception.SnapshotIsBusy, + self.driver.delete_snapshot, + self.snapshot) + + self.assertTrue(proxy.unprotect_snap.called) + self.assertFalse(proxy.remove_snap.called) @common_mocks @mock.patch('cinder.objects.Volume.get_by_id') @@ -1325,19 +1416,6 @@ def test_revert_to_snapshot(self): self.snapshot) image.rollback_to_snap.assert_called_once_with(self.snapshot.name) - @common_mocks - def test_get_children_info(self): - volume = self.mock_proxy - volume.set_snap = mock.Mock() - volume.list_children = mock.Mock() - list_children = [('pool', 'volume2')] - volume.list_children.return_value = list_children - - info = self.driver._get_children_info(volume, - self.snapshot['name']) - - self.assertEqual(list_children, info) - @common_mocks def test_get_clone_info(self): volume = self.mock_rbd.Image() diff --git a/cinder/volume/driver.py b/cinder/volume/driver.py index 84404c028f0..2ff27564b27 100644 --- a/cinder/volume/driver.py +++ b/cinder/volume/driver.py @@ -2780,7 +2780,8 @@ def initialize_connection(self, volume, connector): If the backend driver supports multiple connections for multipath and for single path with failover, "target_portals", "target_iqns", - "target_luns" are also populated:: + "target_luns" are also populated. In this example also LUN values + greater than 255 use flat addressing mode:: { 'driver_volume_type': 'iscsi', @@ -2795,6 +2796,7 @@ def initialize_connection(self, volume, connector): 'target_luns': [1, 1], 'volume_id': 1, 'discard': False, + 'addressing_mode': os_brick.constants.SCSI_ADDRESSING_SAM2, } } """ @@ -2940,6 +2942,7 @@ def initialize_connection(self, volume, connector): 'target_lun': 1, 'target_wwn': ['1234567890123', '0987654321321'], 'discard': False, + 'addressing_mode': os_brick.constants.SCSI_ADDRESSING_SAM2, } } diff --git a/cinder/volume/drivers/hitachi/hbsd_common.py b/cinder/volume/drivers/hitachi/hbsd_common.py index a6847a220fc..d0dbd8edcff 100644 --- a/cinder/volume/drivers/hitachi/hbsd_common.py +++ b/cinder/volume/drivers/hitachi/hbsd_common.py @@ -1,4 +1,4 @@ -# Copyright (C) 2020, 2023, Hitachi, Ltd. +# Copyright (C) 2020, 2024, Hitachi, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -14,6 +14,7 @@ # """Common module for Hitachi HBSD Driver.""" +from collections import defaultdict import json import re @@ -29,8 +30,6 @@ from cinder.volume import volume_types from cinder.volume import volume_utils -_GROUP_NAME_FORMAT_DEFAULT_FC = utils.TARGET_PREFIX + '{wwn}' -_GROUP_NAME_FORMAT_DEFAULT_ISCSI = utils.TARGET_PREFIX + '{ip}' _GROUP_NAME_MAX_LEN_FC = 64 _GROUP_NAME_MAX_LEN_ISCSI = 32 @@ -49,6 +48,8 @@ STR_VOLUME = 'volume' STR_SNAPSHOT = 'snapshot' +_UUID_PATTERN = re.compile(r'^[\da-f]{32}$') + _INHERITED_VOLUME_OPTS = [ 'volume_backend_name', 'volume_driver', @@ -147,27 +148,6 @@ help='Format of host groups, iSCSI targets, and server objects.'), ] -_GROUP_NAME_FORMAT = { - 'FC': { - 'group_name_max_len': _GROUP_NAME_MAX_LEN_FC, - 'group_name_var_cnt': { - GROUP_NAME_VAR_WWN: [1], - GROUP_NAME_VAR_IP: [0], - GROUP_NAME_VAR_HOST: [0, 1], - }, - 'group_name_format_default': _GROUP_NAME_FORMAT_DEFAULT_FC, - }, - 'iSCSI': { - 'group_name_max_len': _GROUP_NAME_MAX_LEN_ISCSI, - 'group_name_var_cnt': { - GROUP_NAME_VAR_WWN: [0], - GROUP_NAME_VAR_IP: [1], - GROUP_NAME_VAR_HOST: [0, 1], - }, - 'group_name_format_default': _GROUP_NAME_FORMAT_DEFAULT_ISCSI, - } -} - CONF = cfg.CONF CONF.register_opts(COMMON_VOLUME_OPTS, group=configuration.SHARED_CONF_GROUP) CONF.register_opts(COMMON_PORT_OPTS, group=configuration.SHARED_CONF_GROUP) @@ -217,7 +197,28 @@ def __init__(self, conf, driverinfo, db): 'portals': {}, } self.storage_id = None - self.group_name_format = _GROUP_NAME_FORMAT[driverinfo['proto']] + if self.storage_info['protocol'] == 'FC': + self.group_name_format = { + 'group_name_max_len': _GROUP_NAME_MAX_LEN_FC, + 'group_name_var_cnt': { + GROUP_NAME_VAR_WWN: [1], + GROUP_NAME_VAR_IP: [0], + GROUP_NAME_VAR_HOST: [0, 1], + }, + 'group_name_format_default': self.driver_info[ + 'target_prefix'] + '{wwn}', + } + if self.storage_info['protocol'] == 'iSCSI': + self.group_name_format = { + 'group_name_max_len': _GROUP_NAME_MAX_LEN_ISCSI, + 'group_name_var_cnt': { + GROUP_NAME_VAR_WWN: [0], + GROUP_NAME_VAR_IP: [1], + GROUP_NAME_VAR_HOST: [0, 1], + }, + 'group_name_format_default': self.driver_info[ + 'target_prefix'] + '{ip}', + } self.format_info = { 'group_name_format': self.group_name_format[ 'group_name_format_default'], @@ -348,13 +349,19 @@ def delete_pair_based_on_svol(self, pvol, svol_info): """Disconnect all volume pairs to which the specified S-VOL belongs.""" raise NotImplementedError() - def get_pair_info(self, ldev): + def get_pair_info(self, ldev, ldev_info=None): """Return volume pair info(LDEV number, pair status and pair type).""" raise NotImplementedError() - def delete_pair(self, ldev): - """Disconnect all volume pairs to which the specified LDEV belongs.""" - pair_info = self.get_pair_info(ldev) + def delete_pair(self, ldev, ldev_info=None): + """Disconnect all volume pairs to which the specified LDEV belongs. + + :param int ldev: The ID of the LDEV whose TI pair needs be deleted + :param dict ldev_info: LDEV info + :return: None + :raises VolumeDriverException: if the LDEV is a P-VOL of a TI pair + """ + pair_info = self.get_pair_info(ldev, ldev_info) if not pair_info: return if pair_info['pvol'] == ldev: @@ -385,12 +392,57 @@ def delete_ldev_from_storage(self, ldev): """Delete the specified LDEV from the storage.""" raise NotImplementedError() - def delete_ldev(self, ldev): - """Delete the specified LDEV.""" - self.delete_pair(ldev) + def delete_ldev(self, ldev, ldev_info=None): + """Delete the specified LDEV. + + :param int ldev: The ID of the LDEV to be deleted + :param dict ldev_info: LDEV info + :return: None + """ + self.delete_pair(ldev, ldev_info) self.unmap_ldev_from_storage(ldev) self.delete_ldev_from_storage(ldev) + def is_invalid_ldev(self, ldev, obj, ldev_info_): + """Check if the specified LDEV corresponds to the specified object. + + If the LDEV label and the object's id or name_id do not match, the LDEV + was deleted and another LDEV with the same ID was created for another + volume or snapshot. In this case, we say that the LDEV is invalid. + If the LDEV label is not set or its format is unexpected, we cannot + judge if the LDEV corresponds to the object. This can happen if the + LDEV was created in older versions of this product or if the user + overwrote the label. In this case, we just say that the LDEV is not + invalid, although we are not completely sure about it. + The reason for using name_id rather than id for volumes in comparison + is that id of the volume that corresponds to the LDEV changes by + host-assisted migration while that is not the case with name_id and + that the LDEV label is created from id of the volume when the LDEV is + created and is never changed after that. + Because Snapshot objects do not have name_id, we use id instead of + name_id if the object is a Snapshot. We assume that the object is a + Snapshot object if hasattr(obj, 'name_id') returns False. + This method returns False if the LDEV does not exist on the storage. + The absence of the LDEV on the storage is detected elsewhere. + :param int ldev: The ID of the LDEV to be checked + :param obj: The object to be checked + :type obj: Volume or Snapshot + :param dict ldev_info_: LDEV info. This is an output area. Data is + written by this method, but the area must be secured by the caller. + :return: True if the LDEV does not correspond to the object, False + otherwise + :rtype: bool + """ + ldev_info = self.get_ldev_info(None, ldev) + # To avoid calling the same REST API multiple times, we pass the LDEV + # info to the caller. + ldev_info_.update(ldev_info) + return ('label' in ldev_info + and _UUID_PATTERN.match(ldev_info['label']) + and ldev_info['label'] != ( + obj.name_id if hasattr(obj, 'name_id') else + obj.id).replace('-', '')) + def delete_volume(self, volume): """Delete the specified volume.""" ldev = self.get_ldev(volume) @@ -399,10 +451,20 @@ def delete_volume(self, volume): MSG.INVALID_LDEV_FOR_DELETION, method='delete_volume', id=volume['id']) return + # Check if the LDEV corresponds to the volume. + # To avoid KeyError when accessing a missing attribute, set the default + # value to None. + ldev_info = defaultdict(lambda: None) + if self.is_invalid_ldev(ldev, volume, ldev_info): + # If the LDEV is assigned to another object, skip deleting it. + self.output_log(MSG.SKIP_DELETING_LDEV, obj='volume', + obj_id=volume.id, ldev=ldev, + ldev_label=ldev_info['label']) + return try: - self.delete_ldev(ldev) + self.delete_ldev(ldev, ldev_info) except exception.VolumeDriverException as ex: - if ex.msg == utils.BUSY_MESSAGE: + if utils.BUSY_MESSAGE in ex.msg: raise exception.VolumeIsBusy(volume_name=volume['name']) else: raise ex @@ -424,6 +486,7 @@ def create_snapshot(self, snapshot): new_ldev = self.copy_on_storage( ldev, size, extra_specs, pool_id, snap_pool_id, ldev_range, is_snapshot=True) + self.modify_ldev_name(new_ldev, snapshot.id.replace("-", "")) return { 'provider_location': str(new_ldev), } @@ -436,10 +499,20 @@ def delete_snapshot(self, snapshot): MSG.INVALID_LDEV_FOR_DELETION, method='delete_snapshot', id=snapshot['id']) return + # Check if the LDEV corresponds to the snapshot. + # To avoid KeyError when accessing a missing attribute, set the default + # value to None. + ldev_info = defaultdict(lambda: None) + if self.is_invalid_ldev(ldev, snapshot, ldev_info): + # If the LDEV is assigned to another object, skip deleting it. + self.output_log(MSG.SKIP_DELETING_LDEV, obj='snapshot', + obj_id=snapshot.id, ldev=ldev, + ldev_label=ldev_info['label']) + return try: - self.delete_ldev(ldev) + self.delete_ldev(ldev, ldev_info) except exception.VolumeDriverException as ex: - if ex.msg == utils.BUSY_MESSAGE: + if utils.BUSY_MESSAGE in ex.msg: raise exception.SnapshotIsBusy(snapshot_name=snapshot['name']) else: raise ex @@ -594,7 +667,7 @@ def unmanage(self, volume): try: self.delete_pair(ldev) except exception.VolumeDriverException as ex: - if ex.msg == utils.BUSY_MESSAGE: + if utils.BUSY_MESSAGE in ex.msg: raise exception.VolumeIsBusy(volume_name=volume['name']) else: raise ex @@ -690,7 +763,8 @@ def _check_param_group_name_format(self): if self.conf.hitachi_group_name_format is not None: error_flag = False if re.match( - utils.TARGET_PREFIX + '(' + GROUP_NAME_VAR_WWN + '|' + + self.driver_info['target_prefix'] + '(' + + GROUP_NAME_VAR_WWN + '|' + GROUP_NAME_VAR_IP + '|' + GROUP_NAME_VAR_HOST + '|' + '[' + GROUP_NAME_ALLOWED_CHARS + '])+$', self.conf.hitachi_group_name_format) is None: @@ -1103,6 +1177,11 @@ def migrate_volume(self, volume, host): """Migrate the specified volume.""" return False + def update_migrated_volume(self, new_volume): + """Return model update for migrated volume.""" + return {'_name_id': new_volume.name_id, + 'provider_location': new_volume.provider_location} + def retype(self, ctxt, volume, new_type, diff, host): """Retype the specified volume.""" return False @@ -1158,7 +1237,11 @@ def get_ldev(self, obj, both=False): provider_location = obj.get('provider_location') if not provider_location: return None - if provider_location.isdigit(): + if provider_location.isdigit() and not getattr(self, 'is_secondary', + False): + # This format implies that the value is the ID of an LDEV in the + # primary storage. Therefore, the secondary instance should not + # retrieve this value. return int(provider_location) if provider_location.startswith('{'): loc = json.loads(provider_location) diff --git a/cinder/volume/drivers/hitachi/hbsd_fc.py b/cinder/volume/drivers/hitachi/hbsd_fc.py index e4233ea1353..9afb63ed74c 100644 --- a/cinder/volume/drivers/hitachi/hbsd_fc.py +++ b/cinder/volume/drivers/hitachi/hbsd_fc.py @@ -1,4 +1,4 @@ -# Copyright (C) 2020, 2023, Hitachi, Ltd. +# Copyright (C) 2020, 2024, Hitachi, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -191,8 +191,7 @@ def update_migrated_volume( self, ctxt, volume, new_volume, original_volume_status): """Do any remaining jobs after migration.""" self.common.discard_zero_page(new_volume) - super(HBSDFCDriver, self).update_migrated_volume( - ctxt, volume, new_volume, original_volume_status) + return self.common.update_migrated_volume(new_volume) @volume_utils.trace def copy_image_to_volume(self, context, volume, image_service, image_id, diff --git a/cinder/volume/drivers/hitachi/hbsd_iscsi.py b/cinder/volume/drivers/hitachi/hbsd_iscsi.py index 95ac706c0f7..8bcfe6b38cd 100644 --- a/cinder/volume/drivers/hitachi/hbsd_iscsi.py +++ b/cinder/volume/drivers/hitachi/hbsd_iscsi.py @@ -1,4 +1,4 @@ -# Copyright (C) 2020, 2023, Hitachi, Ltd. +# Copyright (C) 2020, 2024, Hitachi, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -187,8 +187,7 @@ def update_migrated_volume( self, ctxt, volume, new_volume, original_volume_status): """Do any remaining jobs after migration.""" self.common.discard_zero_page(new_volume) - super(HBSDISCSIDriver, self).update_migrated_volume( - ctxt, volume, new_volume, original_volume_status) + return self.common.update_migrated_volume(new_volume) @volume_utils.trace def copy_image_to_volume(self, context, volume, image_service, image_id, diff --git a/cinder/volume/drivers/hitachi/hbsd_replication.py b/cinder/volume/drivers/hitachi/hbsd_replication.py index b296d853730..3cef57aefce 100644 --- a/cinder/volume/drivers/hitachi/hbsd_replication.py +++ b/cinder/volume/drivers/hitachi/hbsd_replication.py @@ -1,4 +1,4 @@ -# Copyright (C) 2022, 2023, Hitachi, Ltd. +# Copyright (C) 2022, 2024, Hitachi, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -14,6 +14,7 @@ # """replication module for Hitachi HBSD Driver.""" +from collections import defaultdict import json from eventlet import greenthread @@ -237,7 +238,15 @@ def update_mirror_conf(self, conf, opts): for opt in opts: name = opt.name.replace('hitachi_mirror_', 'hitachi_') try: - setattr(conf, name, getattr(conf, opt.name)) + if opt.name == 'hitachi_mirror_pool': + if conf.safe_get('hitachi_mirror_pool'): + name = 'hitachi_pools' + value = [getattr(conf, opt.name)] + else: + raise ValueError() + else: + value = getattr(conf, opt.name) + setattr(conf, name, value) except Exception: with excutils.save_and_reraise_exception(): self.rep_secondary.output_log( @@ -554,16 +563,32 @@ def create_volume(self, volume): } return self.rep_primary.create_volume(volume) - def _has_rep_pair(self, ldev): - ldev_info = self.rep_primary.get_ldev_info( - ['status', 'attributes'], ldev) + def _has_rep_pair(self, ldev, ldev_info=None): + """Return if the specified LDEV has a replication pair. + + :param int ldev: The LDEV ID + :param dict ldev_info: LDEV info + :return: True if the LDEV status is normal and the LDEV has a + replication pair, False otherwise + :rtype: bool + """ + if ldev_info is None: + ldev_info = self.rep_primary.get_ldev_info( + ['status', 'attributes'], ldev) return (ldev_info['status'] == rest.NORMAL_STS and self.driver_info['mirror_attr'] in ldev_info['attributes']) - def _get_rep_pair_info(self, pldev): - """Return replication pair info.""" + def _get_rep_pair_info(self, pldev, ldev_info=None): + """Return replication pair info. + + :param int pldev: The ID of the LDEV(P-VOL in case of a pair) + :param dict ldev_info: LDEV info + :return: replication pair info. An empty dict if the LDEV does not + have a pair. + :rtype: dict + """ pair_info = {} - if not self._has_rep_pair(pldev): + if not self._has_rep_pair(pldev, ldev_info): return pair_info self._require_rep_secondary() copy_group_name = self._create_rep_copy_group_name(pldev) @@ -601,6 +626,65 @@ def _delete_rep_pair(self, pvol, svol): self.rep_primary.client.delete_remote_copypair( self.rep_secondary.client, copy_group_name, pvol, svol) + def _delete_volume_pre_check(self, volume): + """Pre-check for delete_volume(). + + :param Volume volume: The volume to be checked + :return: svol: The ID of the S-VOL + :rtype: int + :return: pvol_is_invalid: True if P-VOL is invalid, False otherwise + :rtype: bool + :return: svol_is_invalid: True if S-VOL is invalid, False otherwise + :rtype: bool + :return: pair_exists: True if the pair exists, False otherwise + :rtype: bool + """ + # Check if the LDEV in the primary storage corresponds to the volume + pvol_is_invalid = True + # To avoid KeyError when accessing a missing attribute, set the default + # value to None. + pvol_info = defaultdict(lambda: None) + pvol = self.rep_primary.get_ldev(volume) + if pvol is not None: + if self.rep_primary.is_invalid_ldev(pvol, volume, pvol_info): + # If the LDEV is assigned to another object, skip deleting it. + self.rep_primary.output_log( + MSG.SKIP_DELETING_LDEV, obj='volume', obj_id=volume.id, + ldev=pvol, ldev_label=pvol_info['label']) + else: + pvol_is_invalid = False + # Check if the pair exists on the storage. + pair_exists = False + svol_is_invalid = True + svol = None + if not pvol_is_invalid: + pair_info = self._get_rep_pair_info(pvol, pvol_info) + if pair_info: + pair_exists = True + # Because this pair is a valid P-VOL's pair, we need to delete + # it and its LDEVs. The LDEV ID of the S-VOL to be deleted is + # uniquely determined from the pair info. Therefore, there is + # no need to get it from provider_location or to validate the + # S-VOL by comparing the volume ID with the S-VOL's label. + svol = pair_info['svol_info'][0]['ldev'] + svol_is_invalid = False + # Check if the LDEV in the secondary storage corresponds to the volume + if svol_is_invalid: + svol = self.rep_secondary.get_ldev(volume) + if svol is not None: + # To avoid KeyError when accessing a missing attribute, set the + # default value to None. + svol_info = defaultdict(lambda: None) + if self.rep_secondary.is_invalid_ldev(svol, volume, svol_info): + # If the LDEV is assigned to another object, skip deleting + # it. + self.rep_secondary.output_log( + MSG.SKIP_DELETING_LDEV, obj='volume', obj_id=volume.id, + ldev=svol, ldev_label=svol_info['label']) + else: + svol_is_invalid = False + return svol, pvol_is_invalid, svol_is_invalid, pair_exists + def delete_volume(self, volume): """Delete the specified volume.""" self._require_rep_primary() @@ -610,22 +694,34 @@ def delete_volume(self, volume): MSG.INVALID_LDEV_FOR_DELETION, method='delete_volume', id=volume.id) return - pair_info = self._get_rep_pair_info(ldev) - if pair_info: - self._delete_rep_pair( - pair_info['pvol'], pair_info['svol_info'][0]['ldev']) + # Run pre-check. + svol, pvol_is_invalid, svol_is_invalid, pair_exists = ( + self._delete_volume_pre_check(volume)) + # Delete the pair if it exists. + if pair_exists: + self._delete_rep_pair(ldev, svol) + # Delete LDEVs if they are valid. + thread = None + if not svol_is_invalid: thread = greenthread.spawn( self.rep_secondary.delete_volume, volume) - try: + try: + if not pvol_is_invalid: self.rep_primary.delete_volume(volume) - finally: + finally: + if thread is not None: thread.wait() - else: - self.rep_primary.delete_volume(volume) - def delete_ldev(self, ldev): + def delete_ldev(self, ldev, ldev_info=None): + """Delete the specified LDEV[s]. + + :param int ldev: The ID of the LDEV(P-VOL in case of a pair) to be + deleted + :param dict ldev_info: LDEV(P-VOL in case of a pair) info + :return: None + """ self._require_rep_primary() - pair_info = self._get_rep_pair_info(ldev) + pair_info = self._get_rep_pair_info(ldev, ldev_info) if pair_info: self._delete_rep_pair(ldev, pair_info['svol_info'][0]['ldev']) th = greenthread.spawn(self.rep_secondary.delete_ldev, diff --git a/cinder/volume/drivers/hitachi/hbsd_rest.py b/cinder/volume/drivers/hitachi/hbsd_rest.py index ac7eb2c8606..28cdbc4e2c5 100644 --- a/cinder/volume/drivers/hitachi/hbsd_rest.py +++ b/cinder/volume/drivers/hitachi/hbsd_rest.py @@ -1,4 +1,4 @@ -# Copyright (C) 2020, 2023, Hitachi, Ltd. +# Copyright (C) 2020, 2024, Hitachi, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -522,9 +522,22 @@ def create_pair_on_storage( self._create_clone_pair(pvol, svol, snap_pool_id) def get_ldev_info(self, keys, ldev, **kwargs): - """Return a dictionary of LDEV-related items.""" + """Return a dictionary of LDEV-related items. + + :param keys: LDEV Attributes to be obtained. Specify None to obtain all + LDEV attributes. + :type keys: list or NoneType + :param int ldev: The LDEV ID + :param dict kwargs: REST API options + :return: LDEV info + :rtype: dict + """ d = {} result = self.client.get_ldev(ldev, **kwargs) + if not keys: + # To avoid KeyError when accessing a missing attribute, set the + # default value to None. + return defaultdict(lambda: None, result) for key in keys: d[key] = result.get(key) return d @@ -909,10 +922,17 @@ def _get_copy_pair_info(self, ldev): return pvol, [{'ldev': svol, 'is_psus': is_psus, 'status': status}] - def get_pair_info(self, ldev): - """Return info of the volume pair.""" + def get_pair_info(self, ldev, ldev_info=None): + """Return info of the volume pair. + + :param int ldev: The LDEV ID + :param dict ldev_info: LDEV info + :return: TI pair info if the LDEV has TI pairs, None otherwise + :rtype: dict or NoneType + """ pair_info = {} - ldev_info = self.get_ldev_info(['status', 'attributes'], ldev) + if ldev_info is None: + ldev_info = self.get_ldev_info(['status', 'attributes'], ldev) if (ldev_info['status'] != NORMAL_STS or self.driver_info['pair_attr'] not in ldev_info['attributes']): return None @@ -1278,6 +1298,8 @@ def _create_cgsnapshot_volume(snapshot): extra_specs = self.get_volume_extra_specs(snapshot.volume) pair['svol'] = self.create_ldev(size, extra_specs, pool_id, ldev_range) + self.modify_ldev_name(pair['svol'], + snapshot.id.replace("-", "")) except Exception as exc: pair['msg'] = utils.get_exception_msg(exc) raise loopingcall.LoopingCallDone(pair) diff --git a/cinder/volume/drivers/hitachi/hbsd_utils.py b/cinder/volume/drivers/hitachi/hbsd_utils.py index b5f2ee90af6..7bd33bbf1f0 100644 --- a/cinder/volume/drivers/hitachi/hbsd_utils.py +++ b/cinder/volume/drivers/hitachi/hbsd_utils.py @@ -1,4 +1,4 @@ -# Copyright (C) 2020, 2023, Hitachi, Ltd. +# Copyright (C) 2020, 2024, Hitachi, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -253,6 +253,14 @@ class HBSDMsg(enum.Enum): 'to be active by the Fibre Channel Zone Manager.', 'suffix': WARNING_SUFFIX, } + SKIP_DELETING_LDEV = { + 'msg_id': 348, + 'loglevel': base_logging.WARNING, + 'msg': 'Skip deleting the LDEV and its LUNs and pairs because the ' + 'LDEV is used by another object. (%(obj)s: %(obj_id)s, LDEV: ' + '%(ldev)s, LDEV label: %(ldev_label)s)', + 'suffix': WARNING_SUFFIX, + } STORAGE_COMMAND_FAILED = { 'msg_id': 600, 'loglevel': base_logging.ERROR, diff --git a/cinder/volume/drivers/hpe/hpe_3par_common.py b/cinder/volume/drivers/hpe/hpe_3par_common.py index ed643fe3163..63edaae046f 100644 --- a/cinder/volume/drivers/hpe/hpe_3par_common.py +++ b/cinder/volume/drivers/hpe/hpe_3par_common.py @@ -464,6 +464,7 @@ def do_setup(self, context, timeout=None, stats=None, array_id=None): # case of a fail-over. self._get_3par_config(array_id=array_id) self.client = self._create_client(timeout=timeout) + self.client_login() wsapi_version = self.client.getWsApiVersion() self.API_VERSION = wsapi_version['build'] diff --git a/cinder/volume/drivers/pure.py b/cinder/volume/drivers/pure.py index 57ee1dcd232..eec960d9883 100644 --- a/cinder/volume/drivers/pure.py +++ b/cinder/volume/drivers/pure.py @@ -24,6 +24,7 @@ import re import uuid +from os_brick import constants as brick_constants from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils @@ -2850,6 +2851,7 @@ def _build_connection_properties(self, targets): "data": { "target_discovered": False, "discard": True, + "addressing_mode": brick_constants.SCSI_ADDRESSING_SAM2, }, } @@ -3090,6 +3092,7 @@ def initialize_connection(self, volume, connector): "target_wwns": target_wwns, "initiator_target_map": init_targ_map, "discard": True, + "addressing_mode": brick_constants.SCSI_ADDRESSING_SAM2, } } properties["data"]["wwn"] = self._get_wwn(pure_vol_name) diff --git a/cinder/volume/drivers/rbd.py b/cinder/volume/drivers/rbd.py index 1f4dac8d9fd..237d7591b15 100644 --- a/cinder/volume/drivers/rbd.py +++ b/cinder/volume/drivers/rbd.py @@ -1,4 +1,5 @@ # Copyright 2013 OpenStack Foundation +# Copyright 2022 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -133,6 +134,9 @@ cfg.IntOpt('deferred_deletion_purge_interval', default=60, help='Number of seconds between runs of the periodic task ' 'to purge volumes tagged for deletion.'), + cfg.IntOpt('rbd_concurrent_flatten_operations', default=3, min=0, + help='Number of flatten operations that will run ' + 'concurrently on this volume service.') ] CONF = cfg.CONF @@ -356,6 +360,10 @@ def __init__(self, self.keyring_data: Optional[str] = None self._set_keyring_attributes() + self._semaphore = utils.semaphore_factory( + limit=self.configuration.rbd_concurrent_flatten_operations, + concurrent_processes=1) + def _set_keyring_attributes(self) -> None: # The rbd_keyring_conf option is not available for OpenStack usage # for security reasons (OSSN-0085) and in OpenStack we use @@ -829,6 +837,25 @@ def _extend_if_required(self, volume: Volume, src_vref: Volume) -> None: 'dst_size': volume.size}) self._resize(volume) + def _flatten_volume( + self, + image_name: str, + client: RADOSClient) -> None: + + # Flatten destination volume + try: + with RBDVolumeProxy(self, image_name, client=client, + ioctx=client.ioctx) as dest_volume: + LOG.debug("flattening volume %s", image_name) + dest_volume.flatten() + except Exception as e: + msg = (_("Failed to flatten volume %(volume)s with " + "error: %(error)s.") % + {'volume': image_name, + 'error': e}) + LOG.exception(msg) + raise exception.VolumeBackendAPIException(data=msg) + def create_cloned_volume( self, volume: Volume, @@ -889,24 +916,12 @@ def create_cloned_volume( # volumes are always flattened. if (volume.use_quota and depth >= self.configuration.rbd_max_clone_depth): + LOG.info("maximum clone depth (%d) has been reached - " "flattening dest volume", self.configuration.rbd_max_clone_depth) - # Flatten destination volume - try: - with RBDVolumeProxy(self, dest_name, client=client, - ioctx=client.ioctx) as dest_volume: - LOG.debug("flattening dest volume %s", dest_name) - dest_volume.flatten() - except Exception as e: - msg = (_("Failed to flatten volume %(volume)s with " - "error: %(error)s.") % - {'volume': dest_name, - 'error': e}) - LOG.exception(msg) - src_volume.close() - raise exception.VolumeBackendAPIException(data=msg) + self._flatten_volume(dest_name, client) try: # remove temporary snap @@ -1167,11 +1182,22 @@ def create_volume(self, volume: Volume) -> dict[str, Any]: return volume_update + @utils.limit_operations + def _do_flatten(self, volume_name: str, pool: str) -> None: + LOG.debug('flattening %s/%s', pool, volume_name) + try: + with RBDVolumeProxy(self, volume_name, pool=pool) as vol: + vol.flatten() + LOG.debug('flattening of %s/%s has completed', + pool, volume_name) + except self.rbd.ImageNotFound: + LOG.debug('image %s not found during flatten', volume_name) + # do nothing + def _flatten(self, pool: str, volume_name: str) -> None: - LOG.debug('flattening %(pool)s/%(img)s', - dict(pool=pool, img=volume_name)) - with RBDVolumeProxy(self, volume_name, pool) as vol: - vol.flatten() + image = pool + '/' + volume_name + LOG.debug('Queueing %s for flattening', image) + self._do_flatten(volume_name, pool) def _get_stripe_unit(self, ioctx: 'rados.Ioctx', volume_name: str) -> int: """Return the correct stripe unit for a cloned volume. @@ -1309,23 +1335,6 @@ def _get_clone_info( return (None, None, None) - def _get_children_info(self, - volume: 'rbd.Image', - snap: Optional[str]) -> list[tuple]: - """List children for the given snapshot of a volume(image). - - Returns a list of (pool, image). - """ - - children_list = [] - - if snap: - volume.set_snap(snap) - children_list = volume.list_children() - volume.set_snap(None) - - return children_list - def _delete_clone_parent_refs(self, client: RADOSClient, parent_name: str, @@ -1368,96 +1377,161 @@ def _delete_clone_parent_refs(self, g_parent_snap = typing.cast(str, g_parent_snap) self._delete_clone_parent_refs(client, g_parent, g_parent_snap) - def delete_volume(self, volume: Volume) -> None: - """Deletes a logical volume.""" - volume_name = volume.name - with RADOSClient(self) as client: + def _flatten_children(self, + client_ioctx: 'rados.Ioctx', + volume_name: str, + snap_name: Optional[str] = None) -> None: + with RBDVolumeProxy(self, + volume_name, + ioctx=client_ioctx) as rbd_image: + if snap_name is not None: + rbd_image.set_snap(snap_name) + + children_list = rbd_image.list_children() + + for (pool, child_name) in children_list: + LOG.info('Image %(pool)s/%(image)s%(snap)s is dependent ' + 'on the image %(volume_name)s.', + {'pool': pool, + 'image': child_name, + 'volume_name': volume_name, + 'snap': '@' + snap_name if snap_name else ''}) try: - rbd_image = self.rbd.Image(client.ioctx, volume_name) - except self.rbd.ImageNotFound: - LOG.info("volume %s no longer exists in backend", - volume_name) - return + self._flatten(pool, child_name) + except Exception as e: + LOG.error(e) + raise + + def _move_volume_to_trash(self, + client_ioctx: 'rados.Ioctx', + volume_name: str, + delay: int) -> None: + # trash_move() will succeed in some situations when a regular + # remove() call will fail due to image dependencies + LOG.debug("moving volume %s to trash", volume_name) + try: + self.RBDProxy().trash_move(client_ioctx, + volume_name, + delay) + except self.rbd.ImageBusy: + msg = _('ImageBusy error raised while trashing RBD ' + 'volume.') + LOG.warning(msg) + raise exception.VolumeIsBusy(msg, volume_name=volume_name) + + def _try_remove_volume(self, client: 'rados', volume_name: str) -> bool: + # Try a couple of times to delete the volume, rather than + # stopping on the first error. + # In the event of simultaneous Cinder delete operations, + # this gives a window for other deletes of snapshots and images + # to complete, freeing dependencies which allow this remove to + # succeed. + @utils.retry((self.rbd.ImageBusy, self.rbd.ImageHasSnapshots), + self.configuration.rados_connection_interval, + self.configuration.rados_connection_retries) + def _do_try_remove_volume(self, client, volume_name: str) -> bool: + try: + LOG.debug('Trying to remove image %s', volume_name) + self.RBDProxy().remove(client.ioctx, volume_name) + return True + except (self.rbd.ImageHasSnapshots, self.rbd.ImageBusy): + with excutils.save_and_reraise_exception(): + msg = _('deletion failed') + LOG.info(msg) + + return False - clone_snap = None - parent = None + return _do_try_remove_volume(self, client, volume_name) - # Ensure any backup snapshots are deleted - self._delete_backup_snaps(rbd_image) + @staticmethod + def _find_clone_snap(rbd_image: RBDVolumeProxy) -> Optional[str]: + snaps = rbd_image.list_snaps() + for snap in snaps: + if snap['name'].endswith('.clone_snap'): + LOG.debug("volume has clone snapshot(s)") + # We grab one of these and use it when fetching parent + # info in case the volume has been flattened. + clone_snap = snap['name'] + return clone_snap - # If the volume has non-clone snapshots this delete is expected to - # raise VolumeIsBusy so do so straight away. - try: - snaps = rbd_image.list_snaps() - for snap in snaps: - if snap['name'].endswith('.clone_snap'): - LOG.debug("volume has clone snapshot(s)") - # We grab one of these and use it when fetching parent - # info in case the volume has been flattened. - clone_snap = snap['name'] - break + return None + + def _delete_volume(self, volume: Volume, client: RADOSClient) -> None: + + clone_snap = None + parent = None + parent_snap = None + + try: + with RBDVolumeProxy(self, + volume.name, + ioctx=client.ioctx) as rbd_image: + # Ensure any backup snapshots are deleted + self._delete_backup_snaps(rbd_image) + + clone_snap = self._find_clone_snap(rbd_image) # Determine if this volume is itself a clone _pool, parent, parent_snap = self._get_clone_info(rbd_image, - volume_name, + volume.name, clone_snap) - finally: - rbd_image.close() + except self.rbd.ImageNotFound: + LOG.info("volume %s no longer exists in backend", volume.name) + return - @utils.retry(self.rbd.ImageBusy, - self.configuration.rados_connection_interval, - self.configuration.rados_connection_retries) - def _try_remove_volume(client: Any, volume_name: str) -> None: - if self.configuration.enable_deferred_deletion: - delay = self.configuration.deferred_deletion_delay - else: - try: - self.RBDProxy().remove(client.ioctx, volume_name) - return - except (self.rbd.ImageHasSnapshots, self.rbd.ImageBusy): - delay = 0 - LOG.debug("moving volume %s to trash", volume_name) - # When using the RBD v2 clone api, deleting a volume - # that has a snapshot in the trash space raises a - # busy exception. - # In order to solve this, call the trash operation - # which should succeed when the volume has - # dependencies. - self.RBDProxy().trash_move(client.ioctx, - volume_name, - delay) + if clone_snap is not None: + # If the volume has copy-on-write clones, keep it as a silent + # volume which will be deleted when its snapshots and clones + # are deleted. + # TODO: only do this if it actually can't be deleted? + new_name = "%s.deleted" % (volume.name) + self.RBDProxy().rename(client.ioctx, volume.name, new_name) + return - if clone_snap is None: - LOG.debug("deleting rbd volume %s", volume_name) - try: - _try_remove_volume(client, volume_name) - except self.rbd.ImageBusy: - msg = (_("ImageBusy error raised while deleting rbd " - "volume. This may have been caused by a " - "connection from a client that has crashed and, " - "if so, may be resolved by retrying the delete " - "after 30 seconds has elapsed.")) - LOG.warning(msg) - # Now raise this so that the volume stays available and the - # deletion can be retried. - raise exception.VolumeIsBusy(msg, volume_name=volume_name) - except self.rbd.ImageNotFound: - LOG.info("RBD volume %s not found, allowing delete " - "operation to proceed.", volume_name) - return - - # If it is a clone, walk back up the parent chain deleting - # references. - if parent: - LOG.debug("volume is a clone so cleaning references") - parent_snap = typing.cast(str, parent_snap) - self._delete_clone_parent_refs(client, parent, parent_snap) - else: - # If the volume has copy-on-write clones we will not be able to - # delete it. Instead we will keep it as a silent volume which - # will be deleted when it's snapshot and clones are deleted. - new_name = "%s.deleted" % (volume_name) - self.RBDProxy().rename(client.ioctx, volume_name, new_name) + LOG.debug("deleting RBD volume %s", volume.name) + + try: + self.RBDProxy().remove(client.ioctx, volume.name) + return # the fast path was successful + except (self.rbd.ImageHasSnapshots, self.rbd.ImageBusy): + self._flatten_children(client.ioctx, volume.name) + except self.rbd.ImageNotFound: + LOG.info("RBD volume %s not found, allowing delete " + "operation to proceed.", volume.name) + return + + try: + if self._try_remove_volume(client, volume.name): + return + except self.rbd.ImageHasSnapshots: + # perform trash instead, which can succeed when snapshots exist + pass + except self.rbd.ImageBusy: + msg = _('ImageBusy error raised while deleting RBD volume') + raise exception.VolumeIsBusy(msg, volume_name=volume.name) + + delay = 0 + if self.configuration.enable_deferred_deletion: + delay = self.configuration.deferred_deletion_delay + + # Since it failed to remove above, trash the volume here instead. + # This covers the scenario of an image unable to be deleted because + # a child snapshot of it has been trashed but not yet removed. + # That snapshot is not visible but is still in the dependency + # chain of RBD images. + self._move_volume_to_trash(client.ioctx, volume.name, delay) + + # If it is a clone, walk back up the parent chain deleting + # references. + if parent: + LOG.debug("volume is a clone so cleaning references") + parent_snap = typing.cast(str, parent_snap) + self._delete_clone_parent_refs(client, parent, parent_snap) + + def delete_volume(self, volume: Volume) -> None: + """Deletes an RBD volume.""" + with RADOSClient(self) as client: + self._delete_volume(volume, client) def create_snapshot(self, snapshot: Snapshot) -> None: """Creates an rbd snapshot.""" @@ -1471,38 +1545,42 @@ def delete_snapshot(self, snapshot: Snapshot) -> None: volume_name = snapshot.volume_name snap_name = snapshot.name - try: - with RBDVolumeProxy(self, volume_name) as volume: - try: + @utils.retry(self.rbd.ImageBusy, + self.configuration.rados_connection_interval, + self.configuration.rados_connection_retries) + def do_unprotect_snap(self, volume_name, snap_name): + try: + with RBDVolumeProxy(self, volume_name) as volume: volume.unprotect_snap(snap_name) - except self.rbd.InvalidArgument: - LOG.info( - "InvalidArgument: Unable to unprotect snapshot %s.", - snap_name) - except self.rbd.ImageNotFound: - LOG.info("Snapshot %s does not exist in backend.", - snap_name) - return - except self.rbd.ImageBusy: - children_list = self._get_children_info(volume, snap_name) - - if children_list: - for (pool, image) in children_list: - LOG.info('Image %(pool)s/%(image)s is dependent ' - 'on the snapshot %(snap)s.', - {'pool': pool, - 'image': image, - 'snap': snap_name}) - - raise exception.SnapshotIsBusy(snapshot_name=snap_name) + except self.rbd.InvalidArgument: + LOG.info( + "InvalidArgument: Unable to unprotect snapshot %s.", + snap_name) + except self.rbd.ImageNotFound as e: + LOG.info("Snapshot %s does not exist in backend.", + snap_name) + raise e + except self.rbd.ImageBusy as e: + # flatten and then retry the operation + with RADOSClient(self) as client: + self._flatten_children(client.ioctx, + volume_name, + snap_name) + raise e - try: - volume.remove_snap(snap_name) - except self.rbd.ImageNotFound: - LOG.info("Snapshot %s does not exist in backend.", - snap_name) + try: + do_unprotect_snap(self, volume_name, snap_name) + except self.rbd.ImageBusy: + raise exception.SnapshotIsBusy(snapshot_name=snap_name) + except self.rbd.ImageNotFound: + return + + try: + with RBDVolumeProxy(self, volume_name) as volume: + volume.remove_snap(snap_name) except self.rbd.ImageNotFound: - LOG.warning("Volume %s does not exist in backend.", volume_name) + LOG.info("Snapshot %s does not exist in backend.", + snap_name) def snapshot_revert_use_temp_snapshot(self) -> bool: """Disable the use of a temporary snapshot on revert.""" diff --git a/doc/source/cli/cinder-manage.rst b/doc/source/cli/cinder-manage.rst index a0b39cd5ca8..8b005bf86d7 100644 --- a/doc/source/cli/cinder-manage.rst +++ b/doc/source/cli/cinder-manage.rst @@ -178,6 +178,15 @@ Delete a volume without first checking that the volume is available. Updates the host name of all volumes currently associated with a specified host. +``cinder-manage volume update_service`` + +When upgrading cinder, new service entries are created in the database as the +existing cinder-volume host(s) are upgraded. In some cases, rows in the +volumes table keep references to the old service, which can prevent the old +services from being deleted when the database is purged. This command makes +sure that all volumes have updated service references for all volumes on all +cinder-volume hosts. + Cinder Host ~~~~~~~~~~~ diff --git a/doc/source/configuration/block-storage/drivers/hitachi-vsp-driver.rst b/doc/source/configuration/block-storage/drivers/hitachi-vsp-driver.rst index 5ec0dde23da..62bd5592424 100644 --- a/doc/source/configuration/block-storage/drivers/hitachi-vsp-driver.rst +++ b/doc/source/configuration/block-storage/drivers/hitachi-vsp-driver.rst @@ -126,6 +126,12 @@ If you use iSCSI: - ``Ports`` Assign an IP address and a TCP port number to the port. +.. note:: + + * Do not change LDEV nickname for the LDEVs created by Hitachi block + storage driver. The nickname is referred when deleting a volume or + a snapshot, to avoid data-loss risk. See details in `bug #2072317`_. + Set up Hitachi storage volume driver ------------------------------------ @@ -184,3 +190,7 @@ Required options - ``hitachi_pools`` Pool number(s) or pool name(s) of the DP pool. + +.. Document Hyperlinks +.. _bug #2072317: + https://bugs.launchpad.net/cinder/+bug/2072317 diff --git a/playbooks/enable-fips.yaml b/playbooks/enable-fips.yaml new file mode 100644 index 00000000000..bc1dc04ea8f --- /dev/null +++ b/playbooks/enable-fips.yaml @@ -0,0 +1,3 @@ +- hosts: all + roles: + - enable-fips diff --git a/releasenotes/notes/bug-1823445-c47c25870a98335a.yaml b/releasenotes/notes/bug-1823445-c47c25870a98335a.yaml new file mode 100644 index 00000000000..80215f9ea28 --- /dev/null +++ b/releasenotes/notes/bug-1823445-c47c25870a98335a.yaml @@ -0,0 +1,10 @@ +--- +fixes: + - | + Fixed the volume property `signature_verified` propagating to images created + from volumes. That property could later conflict with the same property being + added again when creating a new volume from such image, preventing the volume + from being created successfully. This volume property is created whenever a + volume is created from an image for the purpose of indicating that the image + signature was verified on creation, and was not intended to be propagated + further if a new image is created from such volume. diff --git a/releasenotes/notes/bug-2058596-3c676e7fdc642b3d.yaml b/releasenotes/notes/bug-2058596-3c676e7fdc642b3d.yaml new file mode 100644 index 00000000000..15440ba4b4b --- /dev/null +++ b/releasenotes/notes/bug-2058596-3c676e7fdc642b3d.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + `Bug #2058596 `_: Fixed + broken ``backup_swift_service_auth=True`` which made swift backup driver + consistently fail during object data access. diff --git a/releasenotes/notes/ceph-catch-more-failure-conditions-d2ec640f5ff8051c.yaml b/releasenotes/notes/ceph-catch-more-failure-conditions-d2ec640f5ff8051c.yaml new file mode 100644 index 00000000000..4bb20a377e1 --- /dev/null +++ b/releasenotes/notes/ceph-catch-more-failure-conditions-d2ec640f5ff8051c.yaml @@ -0,0 +1,10 @@ +--- +fixes: + - | + `Bug #2031897 `_: Fixed + issues for volume backups with the Ceph driver where failures of the first + process ("rbd export-diff") were not caught. Instead, only the return code + of the second process ("rbd import-diff") was recognized. + + This change also preserves the stderr that was lost previously + in order to ease debugging. diff --git a/releasenotes/notes/hitachi-prevent-data-loss-9ec3569d7d5b1e7d.yaml b/releasenotes/notes/hitachi-prevent-data-loss-9ec3569d7d5b1e7d.yaml new file mode 100644 index 00000000000..7bc3dc419b8 --- /dev/null +++ b/releasenotes/notes/hitachi-prevent-data-loss-9ec3569d7d5b1e7d.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Hitachi driver `bug #2072317 + `_: Fix potential + data-loss due to a network issue during a volume deletion. diff --git a/releasenotes/notes/hitachi-vsp-fix-except-in-del-vol-ca8b4c5d40d69531.yaml b/releasenotes/notes/hitachi-vsp-fix-except-in-del-vol-ca8b4c5d40d69531.yaml new file mode 100644 index 00000000000..d8a0dcbc4ba --- /dev/null +++ b/releasenotes/notes/hitachi-vsp-fix-except-in-del-vol-ca8b4c5d40d69531.yaml @@ -0,0 +1,6 @@ +fixes: + - | + Hitachi driver `bug #2024418 + `_: Fixed to raise + correct exception when volume is busy while performing delete volume + operation. \ No newline at end of file diff --git a/releasenotes/notes/hitachi-vsp-fix-to-use-correct-HGname-78c3c47dcf984ddf.yaml b/releasenotes/notes/hitachi-vsp-fix-to-use-correct-HGname-78c3c47dcf984ddf.yaml new file mode 100644 index 00000000000..f865dc560a9 --- /dev/null +++ b/releasenotes/notes/hitachi-vsp-fix-to-use-correct-HGname-78c3c47dcf984ddf.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + HPE XP and NEC V driver `bug #2012515 + `_: Fixed to use + correct Host group name. diff --git a/releasenotes/notes/hitachi-vsp-fix-to-use-correct-pool-in-GAD-9413a343dcc98029.yaml b/releasenotes/notes/hitachi-vsp-fix-to-use-correct-pool-in-GAD-9413a343dcc98029.yaml new file mode 100644 index 00000000000..ecfc344747f --- /dev/null +++ b/releasenotes/notes/hitachi-vsp-fix-to-use-correct-pool-in-GAD-9413a343dcc98029.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Hitachi driver `bug #2011810 + `_: Fixed to use + correct pool number for secondary storage on GAD environment. diff --git a/releasenotes/notes/hitachi_fix-ldevnickname-0a0756449e7448d9.yaml b/releasenotes/notes/hitachi_fix-ldevnickname-0a0756449e7448d9.yaml new file mode 100644 index 00000000000..2f16814dfee --- /dev/null +++ b/releasenotes/notes/hitachi_fix-ldevnickname-0a0756449e7448d9.yaml @@ -0,0 +1,6 @@ +fixes: + - | + Hitachi driver `bug #2071697 + '_: Fix to set + correct object ID as LDEV nickname when running host-assisted + migration with ``retype`` or ``migration`` commands. diff --git a/releasenotes/notes/hitachi_fix-testscripts-e4490f9f99994fb8.yaml b/releasenotes/notes/hitachi_fix-testscripts-e4490f9f99994fb8.yaml new file mode 100644 index 00000000000..f6354693d9b --- /dev/null +++ b/releasenotes/notes/hitachi_fix-testscripts-e4490f9f99994fb8.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Hitachi driver `bug #2063317 + `_: Fix test scripts to + avoid failing by unexpected response from psuedo REST API server diff --git a/releasenotes/notes/hpe-3par-login-getWsApiVersion-0252d655844ae054.yaml b/releasenotes/notes/hpe-3par-login-getWsApiVersion-0252d655844ae054.yaml new file mode 100644 index 00000000000..66e78451c29 --- /dev/null +++ b/releasenotes/notes/hpe-3par-login-getWsApiVersion-0252d655844ae054.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + HPE 3PAR driver `Bug #2068795 `_: + Fixed: Perform login before invoking getWsApiVersion + diff --git a/releasenotes/notes/image-metadata-size-increase-323812970dc0e513.yaml b/releasenotes/notes/image-metadata-size-increase-323812970dc0e513.yaml new file mode 100644 index 00000000000..14be1817d08 --- /dev/null +++ b/releasenotes/notes/image-metadata-size-increase-323812970dc0e513.yaml @@ -0,0 +1,8 @@ +--- +fixes: + - | + `Bug #1988942 `_: Increased + size of volume image metadata values accepted by the Block Storage API. + Volume image metadata values were limited to 255 characters but Glance + allows up to 65535 bytes. This change does not affect the database + tables which already allow up to 65535 bytes for image metadata values. diff --git a/releasenotes/notes/pure-report-addressing-91963e29fbed32a4.yaml b/releasenotes/notes/pure-report-addressing-91963e29fbed32a4.yaml new file mode 100644 index 00000000000..1080695760c --- /dev/null +++ b/releasenotes/notes/pure-report-addressing-91963e29fbed32a4.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - | + Pure iSCSI & FC driver `bug #2006960 + `_: Fixed attaching LUNs + greater than 255. Driver leverages new os-brick functionality to specify + LUN addressing mode. diff --git a/releasenotes/notes/rbd-flatten-child-volumes-4cb0b7fcf3a1df5e.yaml b/releasenotes/notes/rbd-flatten-child-volumes-4cb0b7fcf3a1df5e.yaml new file mode 100644 index 00000000000..a6285476571 --- /dev/null +++ b/releasenotes/notes/rbd-flatten-child-volumes-4cb0b7fcf3a1df5e.yaml @@ -0,0 +1,17 @@ +--- +upgrade: + - | + Cinder now uses the RBD trash functionality to handle some volume deletions. + Therefore, deployments must either a) enable scheduled RBD trash purging on + the RBD backend or b) enable the Cinder RBD driver's enable_deferred_deletion + option to have Cinder purge the RBD trash. + This adds the new configuration option 'rbd_concurrent_flatten_operations', + which limits how many RBD flattens the driver will run simultaneously. + This can be used to prevent flatten operations from consuming too much I/O + capacity on the Ceph cluster. It defaults to 3. +fixes: + - | + `Bug #1969643 `_: + The RBD driver can now delete volumes with other volumes cloned from it + (or its snapshots) in cases where deletion would previously fail. This + uses the RBD trash functionality. diff --git a/releasenotes/notes/update-service-uuid-f25dbb05efd45d87.yaml b/releasenotes/notes/update-service-uuid-f25dbb05efd45d87.yaml new file mode 100644 index 00000000000..495598e9fb5 --- /dev/null +++ b/releasenotes/notes/update-service-uuid-f25dbb05efd45d87.yaml @@ -0,0 +1,15 @@ +--- +features: + - | + Added a new cinder-manage command to handle the situation where database + purges would not complete due to the volumes table holding references to + deleted services. The new command makes sure that all volumes have a + reference only to the correct service_uuid, which will allow old service + records to be purged from the database. + + Command: ``cinder-manage volume update_service`` + - | + When Cinder creates a new cinder-volume service, it now also immediately + updates the service_uuid for all volumes associated with that + cinder-volume host. In some cases, this was preventing the database purge + operation from completing successfully. diff --git a/test-requirements.txt b/test-requirements.txt index 10c8d6acb8a..c9539a2a0e6 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -4,7 +4,7 @@ # Install bounded pep8/pyflakes first, then let flake8 install hacking>=5.0.0,<5.1.0 # Apache-2.0 -flake8-import-order # LGPLv3 +flake8-import-order<0.19.0 # LGPLv3 flake8-logging-format>=0.6.0 # Apache-2.0 stestr>=3.2.1 # Apache-2.0 diff --git a/tools/test-setup.sh b/tools/test-setup.sh index 5b986ced361..fced9be5e0f 100755 --- a/tools/test-setup.sh +++ b/tools/test-setup.sh @@ -15,6 +15,47 @@ DB_ROOT_PW=${MYSQL_ROOT_PW:-insecure_slave} DB_USER=openstack_citest DB_PW=openstack_citest +function is_rhel7 { + [ -f /usr/bin/yum ] && \ + cat /etc/*release | grep -q -e "Red Hat" -e "CentOS" -e "CloudLinux" && \ + cat /etc/*release | grep -q 'release 7' +} + +function is_rhel8 { + [ -f /usr/bin/dnf ] && \ + cat /etc/*release | grep -q -e "Red Hat" -e "CentOS" -e "CloudLinux" && \ + cat /etc/*release | grep -q 'release 8' +} + +function is_rhel9 { + [ -f /usr/bin/dnf ] && \ + cat /etc/*release | grep -q -e "Red Hat" -e "CentOS" -e "CloudLinux" && \ + cat /etc/*release | grep -q 'release 9' +} + +function set_conf_line { # file regex value + sudo sh -c "grep -q -e '$2' $1 && \ + sed -i 's|$2|$3|g' $1 || \ + echo '$3' >> $1" +} + +if is_rhel7 || is_rhel8 || is_rhel9; then + # mysql needs to be started on centos/rhel + sudo systemctl restart mariadb.service + + # postgres setup for centos + sudo postgresql-setup --initdb + PG_CONF=/var/lib/pgsql/data/postgresql.conf + set_conf_line $PG_CONF '^password_encryption =.*' 'password_encryption = scram-sha-256' + + PG_HBA=/var/lib/pgsql/data/pg_hba.conf + set_conf_line $PG_HBA '^local[ \t]*all[ \t]*all.*' 'local all all peer' + set_conf_line $PG_HBA '^host[ \t]*all[ \t]*all[ \t]*127.0.0.1\/32.*' 'host all all 127.0.0.1/32 scram-sha-256' + set_conf_line $PG_HBA '^host[ \t]*all[ \t]*all[ \t]*::1\/128.*' 'host all all ::1/128 scram-sha-256' + + sudo systemctl restart postgresql.service +fi + sudo -H mysqladmin -u root password $DB_ROOT_PW # It's best practice to remove anonymous users from the database. If