Skip to content

Commit f12f5ff

Browse files
committed
Don't allow spares to force faulted
The zpool offline man page says that you cannot use 'zpool offline' on spares. However, testing found that you could in fact force fault (zpool offline -f) spares. Add in both kernel side and libzfs side checks for this. The libzfs checks are a little better since they print a helpful error message. Signed-off-by: Tony Hutter <hutter2@llnl.gov>
1 parent 75659a4 commit f12f5ff

File tree

7 files changed

+98
-7
lines changed

7 files changed

+98
-7
lines changed

lib/libzfs/libzfs_pool.c

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3563,10 +3563,32 @@ zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
35633563
zfs_cmd_t zc = {"\0"};
35643564
char errbuf[ERRBUFLEN];
35653565
libzfs_handle_t *hdl = zhp->zpool_hdl;
3566+
nvlist_t *vdev_nv;
3567+
boolean_t avail_spare, l2cache;
3568+
char *vdev_name;
3569+
char guid_str[21]; /* 64-bit num + '\0' */
35663570

35673571
(void) snprintf(errbuf, sizeof (errbuf),
35683572
dgettext(TEXT_DOMAIN, "cannot fault %llu"), (u_longlong_t)guid);
35693573

3574+
snprintf(guid_str, sizeof (guid_str), "%llu", (u_longlong_t)guid);
3575+
if ((vdev_nv = zpool_find_vdev(zhp, guid_str, &avail_spare,
3576+
&l2cache, NULL)) == NULL)
3577+
return (zfs_error(hdl, EZFS_NODEVICE, errbuf));
3578+
3579+
vdev_name = zpool_vdev_name(hdl, zhp, vdev_nv, 0);
3580+
if (vdev_name != NULL) {
3581+
/*
3582+
* We have the actual vdev name, so use that instead of the GUID
3583+
* in any error messages.
3584+
*/
3585+
(void) snprintf(errbuf, sizeof (errbuf),
3586+
dgettext(TEXT_DOMAIN, "cannot fault %s"), vdev_name);
3587+
}
3588+
3589+
if (avail_spare)
3590+
return (zfs_error(hdl, EZFS_ISSPARE, errbuf));
3591+
35703592
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
35713593
zc.zc_guid = guid;
35723594
zc.zc_cookie = VDEV_STATE_FAULTED;

module/zfs/vdev.c

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4309,6 +4309,9 @@ vdev_fault(spa_t *spa, uint64_t guid, vdev_aux_t aux)
43094309
if (!vd->vdev_ops->vdev_op_leaf)
43104310
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENOTSUP)));
43114311

4312+
if (vd->vdev_isspare)
4313+
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENOTSUP)));
4314+
43124315
tvd = vd->vdev_top;
43134316

43144317
/*
@@ -4556,8 +4559,8 @@ vdev_offline_locked(spa_t *spa, uint64_t guid, uint64_t flags)
45564559
if (!vd->vdev_ops->vdev_op_leaf)
45574560
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENOTSUP)));
45584561

4559-
if (vd->vdev_ops == &vdev_draid_spare_ops)
4560-
return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
4562+
if (vd->vdev_isspare)
4563+
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENOTSUP)));
45614564

45624565
tvd = vd->vdev_top;
45634566
mg = tvd->vdev_mg;

tests/runfiles/common.run

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -524,7 +524,7 @@ tags = ['functional', 'cli_root', 'zpool_initialize']
524524

525525
[tests/functional/cli_root/zpool_offline]
526526
tests = ['zpool_offline_001_pos', 'zpool_offline_002_neg',
527-
'zpool_offline_003_pos']
527+
'zpool_offline_003_pos', 'zpool_offline_spare']
528528
tags = ['functional', 'cli_root', 'zpool_offline']
529529

530530
[tests/functional/cli_root/zpool_online]

tests/runfiles/sanity.run

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -323,7 +323,8 @@ pre =
323323
tags = ['functional', 'cli_root', 'zpool_initialize']
324324

325325
[tests/functional/cli_root/zpool_offline]
326-
tests = ['zpool_offline_001_pos', 'zpool_offline_002_neg']
326+
tests = ['zpool_offline_001_pos', 'zpool_offline_002_neg',
327+
'zpool_offline_spare']
327328
tags = ['functional', 'cli_root', 'zpool_offline']
328329

329330
[tests/functional/cli_root/zpool_online]

tests/zfs-tests/tests/Makefile.am

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1216,6 +1216,7 @@ nobase_dist_datadir_zfs_tests_tests_SCRIPTS += \
12161216
functional/cli_root/zpool_offline/zpool_offline_001_pos.ksh \
12171217
functional/cli_root/zpool_offline/zpool_offline_002_neg.ksh \
12181218
functional/cli_root/zpool_offline/zpool_offline_003_pos.ksh \
1219+
functional/cli_root/zpool_offline/zpool_offline_spare.ksh \
12191220
functional/cli_root/zpool_online/cleanup.ksh \
12201221
functional/cli_root/zpool_online/setup.ksh \
12211222
functional/cli_root/zpool_online/zpool_online_001_pos.ksh \
Lines changed: 65 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,65 @@
1+
#!/bin/ksh -p
2+
# SPDX-License-Identifier: CDDL-1.0
3+
#
4+
# CDDL HEADER START
5+
#
6+
# The contents of this file are subject to the terms of the
7+
# Common Development and Distribution License (the "License").
8+
# You may not use this file except in compliance with the License.
9+
#
10+
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
11+
# or https://opensource.org/licenses/CDDL-1.0.
12+
# See the License for the specific language governing permissions
13+
# and limitations under the License.
14+
#
15+
# When distributing Covered Code, include this CDDL HEADER in each
16+
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
17+
# If applicable, add the following below this CDDL HEADER, with the
18+
# fields enclosed by brackets "[]" replaced with your own identifying
19+
# information: Portions Copyright [yyyy] [name of copyright owner]
20+
#
21+
# CDDL HEADER END
22+
#
23+
24+
# Copyright 2026 by Lawrence Livermore National Security, LLC.
25+
26+
. $STF_SUITE/include/libtest.shlib
27+
28+
#
29+
# DESCRIPTION:
30+
# Verify we can't offline or force fault spares
31+
#
32+
# STRATEGY:
33+
# 1. Create pool with traditional spare
34+
# 2. Verify we can't offline/fault the traditional spare
35+
# 3. Create draid pool with draid spare
36+
# 4. Verify we can't offline/fault draid spare
37+
38+
39+
TESTPOOL2=testpool2
40+
function cleanup
41+
{
42+
destroy_pool $TESTPOOL2
43+
log_must rm -f $TESTDIR/file-vdev-{1..3}
44+
}
45+
46+
log_onexit cleanup
47+
verify_runnable "global"
48+
49+
log_assert "Verify zpool offline does not work on spares"
50+
51+
# Verify any old file vdevs are gone
52+
log_mustnot ls $TESTDIR/file-vdev-* &> /dev/null
53+
54+
log_must truncate -s 100M $TESTDIR/file-vdev-{1..3}
55+
56+
log_must zpool create $TESTPOOL2 $TESTDIR/file-vdev-1 spare $TESTDIR/file-vdev-2
57+
log_mustnot zpool offline $TESTPOOL2 $TESTDIR/file-vdev-2
58+
log_mustnot zpool offline -f $TESTPOOL2 $TESTDIR/file-vdev-2
59+
destroy_pool $TESTPOOL2
60+
61+
log_must zpool create $TESTPOOL2 draid1:1d:1s:3c $TESTDIR/file-vdev-{1..3}
62+
log_mustnot zpool offline $TESTPOOL2 draid1-0-0
63+
log_mustnot zpool offline -f $TESTPOOL2 draid1-0-0
64+
65+
log_pass "'zpool offline does not work on spares as expected"

tests/zfs-tests/tests/functional/fault/auto_offline_001_pos.ksh

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -166,9 +166,8 @@ do
166166

167167
mntpnt=$(get_prop mountpoint /$TESTPOOL)
168168

169-
# 2. Fault the spare device making it unavailable
170-
log_must zpool offline -f $TESTPOOL $sparedev
171-
log_must wait_hotspare_state $TESTPOOL $sparedev "FAULTED"
169+
# 2. Remove the spare device making it unavailable
170+
log_must zpool remove $TESTPOOL $sparedev
172171

173172
# 3. Simulate physical removal of one device
174173
remove_disk $removedev

0 commit comments

Comments
 (0)