diff --git a/.go-version b/.go-version index 2f4320f67fe0a..7a429d68a36af 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.24.4 +1.24.6 diff --git a/CHANGELOG/CHANGELOG-1.33.md b/CHANGELOG/CHANGELOG-1.33.md index de03744792802..9f3ee474305ff 100644 --- a/CHANGELOG/CHANGELOG-1.33.md +++ b/CHANGELOG/CHANGELOG-1.33.md @@ -1,170 +1,412 @@ -- [v1.33.2](#v1332) - - [Downloads for v1.33.2](#downloads-for-v1332) +- [v1.33.4](#v1334) + - [Downloads for v1.33.4](#downloads-for-v1334) - [Source Code](#source-code) - [Client Binaries](#client-binaries) - [Server Binaries](#server-binaries) - [Node Binaries](#node-binaries) - [Container Images](#container-images) - - [Changelog since v1.33.1](#changelog-since-v1331) + - [Changelog since v1.33.3](#changelog-since-v1333) - [Important Security Information](#important-security-information) - - [CVE-2025-4563: Nodes can bypass dynamic resource allocation authorization checks](#cve-2025-4563-nodes-can-bypass-dynamic-resource-allocation-authorization-checks) + - [CVE-2025-5187: Nodes can delete themselves by adding an OwnerReference](#cve-2025-5187-nodes-can-delete-themselves-by-adding-an-ownerreference) - [Changes by Kind](#changes-by-kind) + - [API Change](#api-change) - [Feature](#feature) - [Bug or Regression](#bug-or-regression) - - [Other (Cleanup or Flake)](#other-cleanup-or-flake) - [Dependencies](#dependencies) - [Added](#added) - [Changed](#changed) - [Removed](#removed) -- [v1.33.1](#v1331) - - [Downloads for v1.33.1](#downloads-for-v1331) +- [v1.33.3](#v1333) + - [Downloads for v1.33.3](#downloads-for-v1333) - [Source Code](#source-code-1) - [Client Binaries](#client-binaries-1) - [Server Binaries](#server-binaries-1) - [Node Binaries](#node-binaries-1) - [Container Images](#container-images-1) - - [Changelog since v1.33.0](#changelog-since-v1330) + - [Changelog since v1.33.2](#changelog-since-v1332) - [Changes by Kind](#changes-by-kind-1) - [Bug or Regression](#bug-or-regression-1) + - [Other (Cleanup or Flake)](#other-cleanup-or-flake) - [Dependencies](#dependencies-1) - [Added](#added-1) - [Changed](#changed-1) - [Removed](#removed-1) -- [v1.33.0](#v1330) - - [Downloads for v1.33.0](#downloads-for-v1330) +- [v1.33.2](#v1332) + - [Downloads for v1.33.2](#downloads-for-v1332) - [Source Code](#source-code-2) - [Client Binaries](#client-binaries-2) - [Server Binaries](#server-binaries-2) - [Node Binaries](#node-binaries-2) - [Container Images](#container-images-2) - - [Changelog since v1.32.0](#changelog-since-v1320) - - [Urgent Upgrade Notes](#urgent-upgrade-notes) - - [(No, really, you MUST read this before you upgrade)](#no-really-you-must-read-this-before-you-upgrade) + - [Changelog since v1.33.1](#changelog-since-v1331) + - [Important Security Information](#important-security-information-1) + - [CVE-2025-4563: Nodes can bypass dynamic resource allocation authorization checks](#cve-2025-4563-nodes-can-bypass-dynamic-resource-allocation-authorization-checks) - [Changes by Kind](#changes-by-kind-2) - - [Deprecation](#deprecation) - - [API Change](#api-change) - [Feature](#feature-1) - - [Documentation](#documentation) - [Bug or Regression](#bug-or-regression-2) - [Other (Cleanup or Flake)](#other-cleanup-or-flake-1) - [Dependencies](#dependencies-2) - [Added](#added-2) - [Changed](#changed-2) - [Removed](#removed-2) -- [v1.33.0-rc.1](#v1330-rc1) - - [Downloads for v1.33.0-rc.1](#downloads-for-v1330-rc1) +- [v1.33.1](#v1331) + - [Downloads for v1.33.1](#downloads-for-v1331) - [Source Code](#source-code-3) - [Client Binaries](#client-binaries-3) - [Server Binaries](#server-binaries-3) - [Node Binaries](#node-binaries-3) - [Container Images](#container-images-3) - - [Changelog since v1.33.0-rc.0](#changelog-since-v1330-rc0) + - [Changelog since v1.33.0](#changelog-since-v1330) - [Changes by Kind](#changes-by-kind-3) - [Bug or Regression](#bug-or-regression-3) - [Dependencies](#dependencies-3) - [Added](#added-3) - [Changed](#changed-3) - [Removed](#removed-3) -- [v1.33.0-rc.0](#v1330-rc0) - - [Downloads for v1.33.0-rc.0](#downloads-for-v1330-rc0) +- [v1.33.0](#v1330) + - [Downloads for v1.33.0](#downloads-for-v1330) - [Source Code](#source-code-4) - [Client Binaries](#client-binaries-4) - [Server Binaries](#server-binaries-4) - [Node Binaries](#node-binaries-4) - [Container Images](#container-images-4) - - [Changelog since v1.33.0-beta.0](#changelog-since-v1330-beta0) - - [Urgent Upgrade Notes](#urgent-upgrade-notes-1) - - [(No, really, you MUST read this before you upgrade)](#no-really-you-must-read-this-before-you-upgrade-1) + - [Changelog since v1.32.0](#changelog-since-v1320) + - [Urgent Upgrade Notes](#urgent-upgrade-notes) + - [(No, really, you MUST read this before you upgrade)](#no-really-you-must-read-this-before-you-upgrade) - [Changes by Kind](#changes-by-kind-4) - - [Deprecation](#deprecation-1) + - [Deprecation](#deprecation) - [API Change](#api-change-1) - [Feature](#feature-2) + - [Documentation](#documentation) - [Bug or Regression](#bug-or-regression-4) - [Other (Cleanup or Flake)](#other-cleanup-or-flake-2) - [Dependencies](#dependencies-4) - [Added](#added-4) - [Changed](#changed-4) - [Removed](#removed-4) -- [v1.33.0-beta.0](#v1330-beta0) - - [Downloads for v1.33.0-beta.0](#downloads-for-v1330-beta0) +- [v1.33.0-rc.1](#v1330-rc1) + - [Downloads for v1.33.0-rc.1](#downloads-for-v1330-rc1) - [Source Code](#source-code-5) - [Client Binaries](#client-binaries-5) - [Server Binaries](#server-binaries-5) - [Node Binaries](#node-binaries-5) - [Container Images](#container-images-5) - - [Changelog since v1.33.0-alpha.3](#changelog-since-v1330-alpha3) + - [Changelog since v1.33.0-rc.0](#changelog-since-v1330-rc0) - [Changes by Kind](#changes-by-kind-5) - - [API Change](#api-change-2) - - [Feature](#feature-3) - [Bug or Regression](#bug-or-regression-5) - - [Other (Cleanup or Flake)](#other-cleanup-or-flake-3) - [Dependencies](#dependencies-5) - [Added](#added-5) - [Changed](#changed-5) - [Removed](#removed-5) -- [v1.33.0-alpha.3](#v1330-alpha3) - - [Downloads for v1.33.0-alpha.3](#downloads-for-v1330-alpha3) +- [v1.33.0-rc.0](#v1330-rc0) + - [Downloads for v1.33.0-rc.0](#downloads-for-v1330-rc0) - [Source Code](#source-code-6) - [Client Binaries](#client-binaries-6) - [Server Binaries](#server-binaries-6) - [Node Binaries](#node-binaries-6) - [Container Images](#container-images-6) - - [Changelog since v1.33.0-alpha.2](#changelog-since-v1330-alpha2) - - [Urgent Upgrade Notes](#urgent-upgrade-notes-2) - - [(No, really, you MUST read this before you upgrade)](#no-really-you-must-read-this-before-you-upgrade-2) + - [Changelog since v1.33.0-beta.0](#changelog-since-v1330-beta0) + - [Urgent Upgrade Notes](#urgent-upgrade-notes-1) + - [(No, really, you MUST read this before you upgrade)](#no-really-you-must-read-this-before-you-upgrade-1) - [Changes by Kind](#changes-by-kind-6) - - [Deprecation](#deprecation-2) - - [API Change](#api-change-3) - - [Feature](#feature-4) + - [Deprecation](#deprecation-1) + - [API Change](#api-change-2) + - [Feature](#feature-3) - [Bug or Regression](#bug-or-regression-6) - - [Other (Cleanup or Flake)](#other-cleanup-or-flake-4) + - [Other (Cleanup or Flake)](#other-cleanup-or-flake-3) - [Dependencies](#dependencies-6) - [Added](#added-6) - [Changed](#changed-6) - [Removed](#removed-6) -- [v1.33.0-alpha.2](#v1330-alpha2) - - [Downloads for v1.33.0-alpha.2](#downloads-for-v1330-alpha2) +- [v1.33.0-beta.0](#v1330-beta0) + - [Downloads for v1.33.0-beta.0](#downloads-for-v1330-beta0) - [Source Code](#source-code-7) - [Client Binaries](#client-binaries-7) - [Server Binaries](#server-binaries-7) - [Node Binaries](#node-binaries-7) - [Container Images](#container-images-7) - - [Changelog since v1.33.0-alpha.1](#changelog-since-v1330-alpha1) + - [Changelog since v1.33.0-alpha.3](#changelog-since-v1330-alpha3) - [Changes by Kind](#changes-by-kind-7) - - [Deprecation](#deprecation-3) - - [API Change](#api-change-4) - - [Feature](#feature-5) + - [API Change](#api-change-3) + - [Feature](#feature-4) - [Bug or Regression](#bug-or-regression-7) - - [Other (Cleanup or Flake)](#other-cleanup-or-flake-5) + - [Other (Cleanup or Flake)](#other-cleanup-or-flake-4) - [Dependencies](#dependencies-7) - [Added](#added-7) - [Changed](#changed-7) - [Removed](#removed-7) -- [v1.33.0-alpha.1](#v1330-alpha1) - - [Downloads for v1.33.0-alpha.1](#downloads-for-v1330-alpha1) +- [v1.33.0-alpha.3](#v1330-alpha3) + - [Downloads for v1.33.0-alpha.3](#downloads-for-v1330-alpha3) - [Source Code](#source-code-8) - [Client Binaries](#client-binaries-8) - [Server Binaries](#server-binaries-8) - [Node Binaries](#node-binaries-8) - [Container Images](#container-images-8) - - [Changelog since v1.32.0](#changelog-since-v1320-1) - - [Urgent Upgrade Notes](#urgent-upgrade-notes-3) - - [(No, really, you MUST read this before you upgrade)](#no-really-you-must-read-this-before-you-upgrade-3) + - [Changelog since v1.33.0-alpha.2](#changelog-since-v1330-alpha2) + - [Urgent Upgrade Notes](#urgent-upgrade-notes-2) + - [(No, really, you MUST read this before you upgrade)](#no-really-you-must-read-this-before-you-upgrade-2) - [Changes by Kind](#changes-by-kind-8) - - [API Change](#api-change-5) - - [Feature](#feature-6) - - [Documentation](#documentation-1) + - [Deprecation](#deprecation-2) + - [API Change](#api-change-4) + - [Feature](#feature-5) - [Bug or Regression](#bug-or-regression-8) - - [Other (Cleanup or Flake)](#other-cleanup-or-flake-6) + - [Other (Cleanup or Flake)](#other-cleanup-or-flake-5) - [Dependencies](#dependencies-8) - [Added](#added-8) - [Changed](#changed-8) - [Removed](#removed-8) +- [v1.33.0-alpha.2](#v1330-alpha2) + - [Downloads for v1.33.0-alpha.2](#downloads-for-v1330-alpha2) + - [Source Code](#source-code-9) + - [Client Binaries](#client-binaries-9) + - [Server Binaries](#server-binaries-9) + - [Node Binaries](#node-binaries-9) + - [Container Images](#container-images-9) + - [Changelog since v1.33.0-alpha.1](#changelog-since-v1330-alpha1) + - [Changes by Kind](#changes-by-kind-9) + - [Deprecation](#deprecation-3) + - [API Change](#api-change-5) + - [Feature](#feature-6) + - [Bug or Regression](#bug-or-regression-9) + - [Other (Cleanup or Flake)](#other-cleanup-or-flake-6) + - [Dependencies](#dependencies-9) + - [Added](#added-9) + - [Changed](#changed-9) + - [Removed](#removed-9) +- [v1.33.0-alpha.1](#v1330-alpha1) + - [Downloads for v1.33.0-alpha.1](#downloads-for-v1330-alpha1) + - [Source Code](#source-code-10) + - [Client Binaries](#client-binaries-10) + - [Server Binaries](#server-binaries-10) + - [Node Binaries](#node-binaries-10) + - [Container Images](#container-images-10) + - [Changelog since v1.32.0](#changelog-since-v1320-1) + - [Urgent Upgrade Notes](#urgent-upgrade-notes-3) + - [(No, really, you MUST read this before you upgrade)](#no-really-you-must-read-this-before-you-upgrade-3) + - [Changes by Kind](#changes-by-kind-10) + - [API Change](#api-change-6) + - [Feature](#feature-7) + - [Documentation](#documentation-1) + - [Bug or Regression](#bug-or-regression-10) + - [Other (Cleanup or Flake)](#other-cleanup-or-flake-7) + - [Dependencies](#dependencies-10) + - [Added](#added-10) + - [Changed](#changed-10) + - [Removed](#removed-10) +# v1.33.4 + + +## Downloads for v1.33.4 + + + +### Source Code + +filename | sha512 hash +-------- | ----------- +[kubernetes.tar.gz](https://dl.k8s.io/v1.33.4/kubernetes.tar.gz) | edefd29f93082d860e974a25c9d55cf1a43d4d7b02b7dd8836f3d6c904fe9ba33e8947e8b30c6225fae5b53189c3741d86e5e7fca8520ba82373a112b55b09d7 +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.33.4/kubernetes-src.tar.gz) | caafec0f069761c8996bebba303841e50c0b76a519342cb8905011237075f9a6498736496c306ce3beceae051b784f466cc58543a84d633e1c5b5ce07d8b1c55 + +### Client Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.33.4/kubernetes-client-darwin-amd64.tar.gz) | ecd902e004a072eaa92d60ce81635519aaa93553313c808c0d27d15a07a1164b0cd586e271fe979e731e167daaed8b8816010bd018cb0ff16dcde9a46dcf0736 +[kubernetes-client-darwin-arm64.tar.gz](https://dl.k8s.io/v1.33.4/kubernetes-client-darwin-arm64.tar.gz) | 967943fbe8fe87ad4a0715dd55adcd5ca040db8728e5dbcd51c80753845efadb8732bde37c947c2b2d881f758801118909170d2811ceed1ef932c5c6d6611b42 +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.33.4/kubernetes-client-linux-386.tar.gz) | 0ef69b0736b9d6b81f1d8935c0a8a836368f9ed3746f7f5bf04c51f1f5e6da526da223b145b33e315104303adbba01fcb3b49e515594fa48b85d1172ab6c0fab +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.33.4/kubernetes-client-linux-amd64.tar.gz) | e628239516ed6a3d07d47b451b7f42199fb5dcfb4d416f7b519235fd454e0fca3d0c273cc9c709f653a935a32c1f9fbd0a4be88f4c59d0ddcd674be2c289c8a5 +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.33.4/kubernetes-client-linux-arm.tar.gz) | eb349a54d2013ae535fd60a0c32b0a932f176c9203541fba88e9eecbb794a2701479d09389e04950f5ed27b8a48383072b658cdfe7bddb3f0b60c2657a93d90f +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.33.4/kubernetes-client-linux-arm64.tar.gz) | 6b138fd30c198a55e63202dda76f1c9fa04d6a428ff15de9f10a85031ee70c7fd7ed7dd18d24a111c513ec3b492a876a74968048b432ee07ea281798e17653da +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.33.4/kubernetes-client-linux-ppc64le.tar.gz) | 1e39f514ccfe007d96f66330b508f39fac157f5278c44b9017e86744fd4cc5f9f1b0e6eecbcb83ed0c0a4b4f3bc49c8a567c8058db7e8e94cc2071f926e0a2de +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.33.4/kubernetes-client-linux-s390x.tar.gz) | 9d889bbd825cb31b062800b5f450f8ca0aaf799a0d922af1fc026163a7140aeb1a792f5a918e86b40c82dc5cab67b034bc7e4dfc9133a3f4537ded2c46eff9c6 +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.33.4/kubernetes-client-windows-386.tar.gz) | 8fb67c88aeacbd92ddc7c2d53d9b078fc0ccd4c304159008e2acc073a5fe7ed6d5fdaa6292ae08f027e29ecc1dc3ce3c7fd71f24f59e1bec182110d002ea0d7b +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.33.4/kubernetes-client-windows-amd64.tar.gz) | 6b0cd0b690dcd606adac34c3a6fc1b6733f6331354ff548b600e31154ee097000ecd045addc957c27b6ecea98d9676d2ae0edde62d5f0d205bd3fb0132ed008a +[kubernetes-client-windows-arm64.tar.gz](https://dl.k8s.io/v1.33.4/kubernetes-client-windows-arm64.tar.gz) | b5055310eb2335c371ea1aa97c371745eace51dca5b87773e48bc00818453f29915685cd0bf17917f4888ced0e47c12ec2abd6bd23059d257034ba1fc00e2983 + +### Server Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.33.4/kubernetes-server-linux-amd64.tar.gz) | dc91e00247992e242bd88f7d694f1d090b84211c8440674d14ff37694dfd8241faad4e4f4762e16f9144e337863b4ac388d27cc7597d3b327640f82a4c949823 +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.33.4/kubernetes-server-linux-arm64.tar.gz) | b8c277f1774f4f15fede8d5489707f71f9ec7ebbdc1f5ebfe08b64f067aab9cf4270b33192eb85d571d061db46b4ec33b8f0f4ae961d828f85866fe450902548 +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.33.4/kubernetes-server-linux-ppc64le.tar.gz) | 121d2b65a19737ecadc21906eacc7ff9ee14916e91c42d108fba3c8cf421d36eb39a83d9d7854c03d9a30f310525756973cb56f8f1a62c448abe74c99ace5d62 +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.33.4/kubernetes-server-linux-s390x.tar.gz) | ba53dc9af98de96f03ba5cd4d17299352a09696fd4b3bdd87ee83c8ad33c919aa015d6972f67f12047253b0f2be6f05a6d7ed6025a2bee064caba87224a3bffa + +### Node Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.33.4/kubernetes-node-linux-amd64.tar.gz) | f8735d12b7f95bd400834d0d90e76e08f47db117b963073566766e1c530810a8f032b6c771b243f0c538f34a30fe9da5f3ba77e6396f0b00a254a37b1c3ea6c1 +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.33.4/kubernetes-node-linux-arm64.tar.gz) | c8c3286545cf51c64c7c8bf9b92b80a8da142540fc5e0392152756feb99a07aca38eab96aa2f0315dda28fd912b403917012db36ea5e93799a5aff1912066f26 +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.33.4/kubernetes-node-linux-ppc64le.tar.gz) | 606618ccfb5d7b394ee71081458b971d759423e32705c70b7dad3b85581d39acd84791f938fda210f5d88f159eb08612f6db722617bb16d1ab5d97d0eb7b33c4 +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.33.4/kubernetes-node-linux-s390x.tar.gz) | 3fa0175de48ab81142b4fc68d715a53d5d18cd23d5966b23e80d1b01249e91ccf44d59d06ce6daf65dd24d547938afcb96d222bb2227d1386c7c346b43dbc3f3 +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.33.4/kubernetes-node-windows-amd64.tar.gz) | 205c1f6887ad59b453597a74bc4b6e3e7137e92d2f711c092de1f111d8e6c9137ac01038c84b9a53ebc8f93af7f4453dc557c6fe7610658174eaab06bba0df20 + +### Container Images + +All container images are available as manifest lists and support the described +architectures. It is also possible to pull a specific architecture directly by +adding the "-$ARCH" suffix to the container image name. + +name | architectures +---- | ------------- +[registry.k8s.io/conformance:v1.33.4](https://console.cloud.google.com/artifacts/docker/k8s-artifacts-prod/southamerica-east1/images/conformance) | [amd64](https://console.cloud.google.com/artifacts/docker/k8s-artifacts-prod/southamerica-east1/images/conformance-amd64), [arm64](https://console.cloud.google.com/artifacts/docker/k8s-artifacts-prod/southamerica-east1/images/conformance-arm64), [ppc64le](https://console.cloud.google.com/artifacts/docker/k8s-artifacts-prod/southamerica-east1/images/conformance-ppc64le), [s390x](https://console.cloud.google.com/artifacts/docker/k8s-artifacts-prod/southamerica-east1/images/conformance-s390x) +[registry.k8s.io/kube-apiserver:v1.33.4](https://console.cloud.google.com/artifacts/docker/k8s-artifacts-prod/southamerica-east1/images/kube-apiserver) | [amd64](https://console.cloud.google.com/artifacts/docker/k8s-artifacts-prod/southamerica-east1/images/kube-apiserver-amd64), [arm64](https://console.cloud.google.com/artifacts/docker/k8s-artifacts-prod/southamerica-east1/images/kube-apiserver-arm64), [ppc64le](https://console.cloud.google.com/artifacts/docker/k8s-artifacts-prod/southamerica-east1/images/kube-apiserver-ppc64le), [s390x](https://console.cloud.google.com/artifacts/docker/k8s-artifacts-prod/southamerica-east1/images/kube-apiserver-s390x) +[registry.k8s.io/kube-controller-manager:v1.33.4](https://console.cloud.google.com/artifacts/docker/k8s-artifacts-prod/southamerica-east1/images/kube-controller-manager) | [amd64](https://console.cloud.google.com/artifacts/docker/k8s-artifacts-prod/southamerica-east1/images/kube-controller-manager-amd64), [arm64](https://console.cloud.google.com/artifacts/docker/k8s-artifacts-prod/southamerica-east1/images/kube-controller-manager-arm64), [ppc64le](https://console.cloud.google.com/artifacts/docker/k8s-artifacts-prod/southamerica-east1/images/kube-controller-manager-ppc64le), [s390x](https://console.cloud.google.com/artifacts/docker/k8s-artifacts-prod/southamerica-east1/images/kube-controller-manager-s390x) +[registry.k8s.io/kube-proxy:v1.33.4](https://console.cloud.google.com/artifacts/docker/k8s-artifacts-prod/southamerica-east1/images/kube-proxy) | [amd64](https://console.cloud.google.com/artifacts/docker/k8s-artifacts-prod/southamerica-east1/images/kube-proxy-amd64), [arm64](https://console.cloud.google.com/artifacts/docker/k8s-artifacts-prod/southamerica-east1/images/kube-proxy-arm64), [ppc64le](https://console.cloud.google.com/artifacts/docker/k8s-artifacts-prod/southamerica-east1/images/kube-proxy-ppc64le), [s390x](https://console.cloud.google.com/artifacts/docker/k8s-artifacts-prod/southamerica-east1/images/kube-proxy-s390x) +[registry.k8s.io/kube-scheduler:v1.33.4](https://console.cloud.google.com/artifacts/docker/k8s-artifacts-prod/southamerica-east1/images/kube-scheduler) | [amd64](https://console.cloud.google.com/artifacts/docker/k8s-artifacts-prod/southamerica-east1/images/kube-scheduler-amd64), [arm64](https://console.cloud.google.com/artifacts/docker/k8s-artifacts-prod/southamerica-east1/images/kube-scheduler-arm64), [ppc64le](https://console.cloud.google.com/artifacts/docker/k8s-artifacts-prod/southamerica-east1/images/kube-scheduler-ppc64le), [s390x](https://console.cloud.google.com/artifacts/docker/k8s-artifacts-prod/southamerica-east1/images/kube-scheduler-s390x) +[registry.k8s.io/kubectl:v1.33.4](https://console.cloud.google.com/artifacts/docker/k8s-artifacts-prod/southamerica-east1/images/kubectl) | [amd64](https://console.cloud.google.com/artifacts/docker/k8s-artifacts-prod/southamerica-east1/images/kubectl-amd64), [arm64](https://console.cloud.google.com/artifacts/docker/k8s-artifacts-prod/southamerica-east1/images/kubectl-arm64), [ppc64le](https://console.cloud.google.com/artifacts/docker/k8s-artifacts-prod/southamerica-east1/images/kubectl-ppc64le), [s390x](https://console.cloud.google.com/artifacts/docker/k8s-artifacts-prod/southamerica-east1/images/kubectl-s390x) + +## Changelog since v1.33.3 + +## Important Security Information + +This release contains changes that address the following vulnerabilities: + +### CVE-2025-5187: Nodes can delete themselves by adding an OwnerReference + +A vulnerability exists in the NodeRestriction admission controller where node users can delete their corresponding node object by patching themselves with an OwnerReference to a cluster-scoped resource. If the OwnerReference resource does not exist or is subsequently deleted, the given node object will be deleted via garbage collection. By default, node users are authorized for create and patch requests but not delete requests against their node object. Since the NodeRestriction admission controller does not prevent patching OwnerReferences, a compromised node could leverage this vulnerability to delete and then recreate its node object with modified taints or labels. + + +**Affected Versions**: + - kube-apiserver v1.31.0 - v1.31.11 + - kube-apiserver v1.32.0 - v1.32.7 + - kube-apiserver v1.33.0 - v1.33.3 + +**Fixed Versions**: + - kube-apiserver v1.31.12 + - kube-apiserver v1.32.8 + - kube-apiserver v1.33.4 + +This vulnerability was reported by Paul Viossat. + + +**CVSS Rating:** Medium (6.7) [CVSS:3.1/AV:N/AC:L/PR:H/UI:N/S:U/C:H/I:H/A:L](https://www.first.org/cvss/calculator/3.1#CVSS:3.1/AV:N/AC:L/PR:H/UI:N/S:U/C:H/I:H/A:L) + +## Changes by Kind + +### API Change + +- Fixes a 1.33 regression that can cause a nil panic in kube-scheduler when aggregating resource requests across container's spec and status. ([#133285](https://github.com/kubernetes/kubernetes/pull/133285), [@yue9944882](https://github.com/yue9944882)) [SIG Node and Scheduling] + +### Feature + +- Kubernetes is now built using Go 1.24.5 ([#132897](https://github.com/kubernetes/kubernetes/pull/132897), [@cpanato](https://github.com/cpanato)) [SIG Release and Testing] + +### Bug or Regression + +- Changed the node restrictions to disallow the node to change it's ownerReferences. ([#133468](https://github.com/kubernetes/kubernetes/pull/133468), [@natherz97](https://github.com/natherz97)) [SIG Auth] + +## Dependencies + +### Added +_Nothing has changed._ + +### Changed +_Nothing has changed._ + +### Removed +_Nothing has changed._ + + + +# v1.33.3 + + +## Downloads for v1.33.3 + + + +### Source Code + +filename | sha512 hash +-------- | ----------- +[kubernetes.tar.gz](https://dl.k8s.io/v1.33.3/kubernetes.tar.gz) | 363c52cddaec8b16d6fa00382446907db5d4df262c4ceda293bdcae3bc8033ebe662c4c32fa3f1f66e815b9a4c865ffe93f662f814c10b702359be692c00acfb +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.33.3/kubernetes-src.tar.gz) | d23bdc69123f4975a151224c450cbeadc97895f7645563daea67e01915549ea3fb5b31237598abed4fbe5add3c77ffd92e95cbe3f635cf2f4c0626a704f15fca + +### Client Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.33.3/kubernetes-client-darwin-amd64.tar.gz) | 58fc38f9f7c8952d318ad79139310588e077d2efd5100b586079cbee1cf04211b91d035a897164283bfb792b497139b143dd8bea63b3b538eaa346fb9e9f0379 +[kubernetes-client-darwin-arm64.tar.gz](https://dl.k8s.io/v1.33.3/kubernetes-client-darwin-arm64.tar.gz) | 15adffb9517df740e806698db5c0e973b8a765ef1e999a94e7f60d3598b9fba3b1299b95b5cccb765d94688cd15e153c4a84f4c4f039c45504fd7d3f44e395a2 +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.33.3/kubernetes-client-linux-386.tar.gz) | 7cc1891ac0b230ab90e78cb7bad48e0d0ae4cafc88c8563a82de0f79c6d8dbb429bc5f96a540c84bd7334d2d3978d3e81d80949499c8ea6a66fc166cf9b9196c +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.33.3/kubernetes-client-linux-amd64.tar.gz) | d4ef8efe17406ca3234c4628b0b4c14214f77b42056bd7db8298b0ace78305cf641e250572726996437c08bbb298aa7f942c6e748d4293478d11426a42666103 +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.33.3/kubernetes-client-linux-arm.tar.gz) | 056378073fc2dd46533202c7d2d8dd3468f07a5853497d220d33827f37959934e10c7e10218e86df99c0b4136935fbab6167dd10586b0ec82caebf7806b99d53 +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.33.3/kubernetes-client-linux-arm64.tar.gz) | e5cbf3394c0cab0d4443ed3731bb8010c5e7170bc41fc6bb269f00281643b441491fe4bb121058da8d52d7c87dc32b764e8b3670944b3cd8a1239e3b36430247 +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.33.3/kubernetes-client-linux-ppc64le.tar.gz) | 8f5dca8a7390d63f5793067a3900256a2378534683957b9f3ef1e74338f23da4c0466703dd2fe7c6761ded9c5efbd36114a32d8ebacfab52a7a986f29be41f30 +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.33.3/kubernetes-client-linux-s390x.tar.gz) | fbc8eaa3e8bd85beb0ca02167ff17ca87fba073e55a8cc55f5595339a7cc33f068af81e4525ba196dbce52d0874de8c5beecad988ea41d9fae69b8740136a26e +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.33.3/kubernetes-client-windows-386.tar.gz) | f3b4d95f0399521d93765b891e49f0c2b57b0d62f59254684cd0495679909306acb07eb630460369bd1335a5c97e786c40bfa3d318cceda04f36d0039ef368eb +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.33.3/kubernetes-client-windows-amd64.tar.gz) | d5953a6589159d69aed70f33d3f8c79d947f97659664ef254ae5a18dc2469899f1a0243d58b36324c246a76cc5ecdff93ddb81d864749185c2d8dd777040bad5 +[kubernetes-client-windows-arm64.tar.gz](https://dl.k8s.io/v1.33.3/kubernetes-client-windows-arm64.tar.gz) | e126a72af5f56447236996060a29d9c47191b99b2891482d0f681e1a2640416a7f9151d658b579b7af15e0fb2167062d3a7e7062e8c9bca2342f020d1785813a + +### Server Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.33.3/kubernetes-server-linux-amd64.tar.gz) | 2098b70d6e328e0c5777a20d95cb7c5f8f3cd9f26960165c0db3135e9ddfb5b22e3f5471a130692dc48185592f4684c9239ed8e505a51984e31604c9a2e9040e +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.33.3/kubernetes-server-linux-arm64.tar.gz) | a4b97b9141b49a5bcb2e271b85d03926503c4272689556814cb0714d114ef327c6b209c4b0f0b339475d1bdc9f3dfcaf865c8b4283abaeb0714d2d8602b57f63 +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.33.3/kubernetes-server-linux-ppc64le.tar.gz) | ab326bb628ba477f18f9a33f5abdcd2f36486146f062b09f3f524f8162e6c3d2736699c463b14ef29cde4b9cae18117a6cbe962a63553b2938a240461605aaea +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.33.3/kubernetes-server-linux-s390x.tar.gz) | 8af631c137f65af10129765cdff2697c730ba4ab58b63aea96d73c69e5d4fa2c35ff23416dac24fcadd3f3b856d08cf8223c28b40f4e8a02bb3c698dece6501f + +### Node Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.33.3/kubernetes-node-linux-amd64.tar.gz) | 90d5aa5c08d01febea7f2afe11fb7771568494e68c5cf7b2c1a245b9de24d7962e207efa218ecba45540a2f613b13cf561a8b5f5618f9422042f40a8d7e88988 +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.33.3/kubernetes-node-linux-arm64.tar.gz) | a631b6236485979c98f1a99553e55e4f6a77bc6fcad444490095872a3516b761ad5097297dd730f1b8fb27bd613af4eea0d4fefc3379fa4724bf4915f8576ecb +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.33.3/kubernetes-node-linux-ppc64le.tar.gz) | 342873a2d9eea49bc4b1ca0eca03ba1d019d60a8068bc2f015f5e35f5438e970d8d0722f441778cecf0f72cb5b27082bd1b434fc0d532dc5eaf96533616a8822 +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.33.3/kubernetes-node-linux-s390x.tar.gz) | b0fa7050445cd4d9ffbe8014f72b44984f47ccb1ba7b6fcb191a0d6a784e4c741d1a04584339e6f09d0aa9568120d22dc4cde95f81f79cb52b13105cf5a57a9c +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.33.3/kubernetes-node-windows-amd64.tar.gz) | 741b4e93de0053586220ac210856dff035c8bb64856f600006be73875a53846f55fb32d9262b3fc6aab7b81cca4b2cfe0d05716fbe9c89e8ab8a9ab4e56ae8e4 + +### Container Images + +All container images are available as manifest lists and support the described +architectures. It is also possible to pull a specific architecture directly by +adding the "-$ARCH" suffix to the container image name. + +name | architectures +---- | ------------- +[registry.k8s.io/conformance:v1.33.3](https://console.cloud.google.com/artifacts/docker/k8s-artifacts-prod/southamerica-east1/images/conformance) | [amd64](https://console.cloud.google.com/artifacts/docker/k8s-artifacts-prod/southamerica-east1/images/conformance-amd64), [arm64](https://console.cloud.google.com/artifacts/docker/k8s-artifacts-prod/southamerica-east1/images/conformance-arm64), [ppc64le](https://console.cloud.google.com/artifacts/docker/k8s-artifacts-prod/southamerica-east1/images/conformance-ppc64le), [s390x](https://console.cloud.google.com/artifacts/docker/k8s-artifacts-prod/southamerica-east1/images/conformance-s390x) +[registry.k8s.io/kube-apiserver:v1.33.3](https://console.cloud.google.com/artifacts/docker/k8s-artifacts-prod/southamerica-east1/images/kube-apiserver) | [amd64](https://console.cloud.google.com/artifacts/docker/k8s-artifacts-prod/southamerica-east1/images/kube-apiserver-amd64), [arm64](https://console.cloud.google.com/artifacts/docker/k8s-artifacts-prod/southamerica-east1/images/kube-apiserver-arm64), [ppc64le](https://console.cloud.google.com/artifacts/docker/k8s-artifacts-prod/southamerica-east1/images/kube-apiserver-ppc64le), [s390x](https://console.cloud.google.com/artifacts/docker/k8s-artifacts-prod/southamerica-east1/images/kube-apiserver-s390x) +[registry.k8s.io/kube-controller-manager:v1.33.3](https://console.cloud.google.com/artifacts/docker/k8s-artifacts-prod/southamerica-east1/images/kube-controller-manager) | [amd64](https://console.cloud.google.com/artifacts/docker/k8s-artifacts-prod/southamerica-east1/images/kube-controller-manager-amd64), [arm64](https://console.cloud.google.com/artifacts/docker/k8s-artifacts-prod/southamerica-east1/images/kube-controller-manager-arm64), [ppc64le](https://console.cloud.google.com/artifacts/docker/k8s-artifacts-prod/southamerica-east1/images/kube-controller-manager-ppc64le), [s390x](https://console.cloud.google.com/artifacts/docker/k8s-artifacts-prod/southamerica-east1/images/kube-controller-manager-s390x) +[registry.k8s.io/kube-proxy:v1.33.3](https://console.cloud.google.com/artifacts/docker/k8s-artifacts-prod/southamerica-east1/images/kube-proxy) | [amd64](https://console.cloud.google.com/artifacts/docker/k8s-artifacts-prod/southamerica-east1/images/kube-proxy-amd64), [arm64](https://console.cloud.google.com/artifacts/docker/k8s-artifacts-prod/southamerica-east1/images/kube-proxy-arm64), [ppc64le](https://console.cloud.google.com/artifacts/docker/k8s-artifacts-prod/southamerica-east1/images/kube-proxy-ppc64le), [s390x](https://console.cloud.google.com/artifacts/docker/k8s-artifacts-prod/southamerica-east1/images/kube-proxy-s390x) +[registry.k8s.io/kube-scheduler:v1.33.3](https://console.cloud.google.com/artifacts/docker/k8s-artifacts-prod/southamerica-east1/images/kube-scheduler) | [amd64](https://console.cloud.google.com/artifacts/docker/k8s-artifacts-prod/southamerica-east1/images/kube-scheduler-amd64), [arm64](https://console.cloud.google.com/artifacts/docker/k8s-artifacts-prod/southamerica-east1/images/kube-scheduler-arm64), [ppc64le](https://console.cloud.google.com/artifacts/docker/k8s-artifacts-prod/southamerica-east1/images/kube-scheduler-ppc64le), [s390x](https://console.cloud.google.com/artifacts/docker/k8s-artifacts-prod/southamerica-east1/images/kube-scheduler-s390x) +[registry.k8s.io/kubectl:v1.33.3](https://console.cloud.google.com/artifacts/docker/k8s-artifacts-prod/southamerica-east1/images/kubectl) | [amd64](https://console.cloud.google.com/artifacts/docker/k8s-artifacts-prod/southamerica-east1/images/kubectl-amd64), [arm64](https://console.cloud.google.com/artifacts/docker/k8s-artifacts-prod/southamerica-east1/images/kubectl-arm64), [ppc64le](https://console.cloud.google.com/artifacts/docker/k8s-artifacts-prod/southamerica-east1/images/kubectl-ppc64le), [s390x](https://console.cloud.google.com/artifacts/docker/k8s-artifacts-prod/southamerica-east1/images/kubectl-s390x) + +## Changelog since v1.33.2 + +## Changes by Kind + +### Bug or Regression + +- Fix a bug causing unexpected delay of creating pods for newly created jobs ([#132158](https://github.com/kubernetes/kubernetes/pull/132158), [@linxiulei](https://github.com/linxiulei)) [SIG Apps and Testing] +- Fix regression introduced in 1.33 - where some Paginated LIST calls are falling back to etcd instead of serving from cache. ([#132337](https://github.com/kubernetes/kubernetes/pull/132337), [@hakuna-matatah](https://github.com/hakuna-matatah)) [SIG API Machinery] +- Fix validation for Job with suspend=true, and completions=0 to set the Complete condition. ([#132728](https://github.com/kubernetes/kubernetes/pull/132728), [@mimowo](https://github.com/mimowo)) [SIG Apps and Testing] +- Kubeadm: fixed issue where etcd member promotion fails with an error saying the member was already promoted ([#132280](https://github.com/kubernetes/kubernetes/pull/132280), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] + +### Other (Cleanup or Flake) + +- Reduce logspam when calculating the container resources on linux ([#132272](https://github.com/kubernetes/kubernetes/pull/132272), [@Peac36](https://github.com/Peac36)) [SIG Node] + +## Dependencies + +### Added +_Nothing has changed._ + +### Changed +_Nothing has changed._ + +### Removed +_Nothing has changed._ + + + # v1.33.2 diff --git a/build/build-image/cross/VERSION b/build/build-image/cross/VERSION index ea4cedcf78fa4..8d1533203d183 100644 --- a/build/build-image/cross/VERSION +++ b/build/build-image/cross/VERSION @@ -1 +1 @@ -v1.33.0-go1.24.4-bullseye.0 +v1.33.0-go1.24.6-bullseye.0 diff --git a/build/common.sh b/build/common.sh index 8612e94612c17..38c20f5ffbb84 100755 --- a/build/common.sh +++ b/build/common.sh @@ -97,8 +97,8 @@ readonly KUBE_RSYNC_PORT="${KUBE_RSYNC_PORT:-}" readonly KUBE_CONTAINER_RSYNC_PORT=8730 # These are the default versions (image tags) for their respective base images. -readonly __default_distroless_iptables_version=v0.7.6 -readonly __default_go_runner_version=v2.4.0-go1.24.4-bookworm.0 +readonly __default_distroless_iptables_version=v0.7.8 +readonly __default_go_runner_version=v2.4.0-go1.24.6-bookworm.0 readonly __default_setcap_version=bookworm-v1.0.4 # These are the base images for the Docker-wrapped binaries. diff --git a/build/dependencies.yaml b/build/dependencies.yaml index dca70ade7fa53..a8cf083168255 100644 --- a/build/dependencies.yaml +++ b/build/dependencies.yaml @@ -116,7 +116,7 @@ dependencies: # Golang - name: "golang: upstream version" - version: 1.24.4 + version: 1.24.6 refPaths: - path: .go-version - path: build/build-image/cross/VERSION @@ -139,7 +139,7 @@ dependencies: match: minimum_go_version=go([0-9]+\.[0-9]+) - name: "registry.k8s.io/kube-cross: dependents" - version: v1.33.0-go1.24.4-bullseye.0 + version: v1.33.0-go1.24.6-bullseye.0 refPaths: - path: build/build-image/cross/VERSION @@ -177,7 +177,7 @@ dependencies: match: registry\.k8s\.io\/build-image\/debian-base:[a-zA-Z]+\-v((([0-9]+)\.([0-9]+)\.([0-9]+)(?:-([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?)(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?) - name: "registry.k8s.io/distroless-iptables: dependents" - version: v0.7.6 + version: v0.7.8 refPaths: - path: build/common.sh match: __default_distroless_iptables_version= @@ -185,7 +185,7 @@ dependencies: match: configs\[DistrolessIptables\] = Config{list\.BuildImageRegistry, "distroless-iptables", "v([0-9]+)\.([0-9]+)\.([0-9]+)"} - name: "registry.k8s.io/go-runner: dependents" - version: v2.4.0-go1.24.4-bookworm.0 + version: v2.4.0-go1.24.6-bookworm.0 refPaths: - path: build/common.sh match: __default_go_runner_version= diff --git a/cmd/kubeadm/app/util/config/initconfiguration.go b/cmd/kubeadm/app/util/config/initconfiguration.go index 6b15b6bc5bbb9..8eefdbca3d11b 100644 --- a/cmd/kubeadm/app/util/config/initconfiguration.go +++ b/cmd/kubeadm/app/util/config/initconfiguration.go @@ -381,6 +381,11 @@ func documentMapToInitConfiguration(gvkmap kubeadmapi.DocumentMap, allowDeprecat // If ClusterConfiguration was given, populate it in the InitConfiguration struct if clustercfg != nil { initcfg.ClusterConfiguration = *clustercfg + + // TODO: Workaround for missing v1beta3 ClusterConfiguration timeout conversion. Remove this conversion once the v1beta3 is removed + if clustercfg.APIServer.TimeoutForControlPlane.Duration != 0 && clustercfg.APIServer.TimeoutForControlPlane.Duration != kubeadmconstants.ControlPlaneComponentHealthCheckTimeout { + initcfg.Timeouts.ControlPlaneComponentHealthCheck.Duration = clustercfg.APIServer.TimeoutForControlPlane.Duration + } } else { // Populate the internal InitConfiguration.ClusterConfiguration with defaults extclustercfg := &kubeadmapiv1.ClusterConfiguration{} diff --git a/pkg/controller/volume/selinuxwarning/cache/volumecache_test.go b/pkg/controller/volume/selinuxwarning/cache/volumecache_test.go index f951a082b2ecb..5bba301b6920a 100644 --- a/pkg/controller/volume/selinuxwarning/cache/volumecache_test.go +++ b/pkg/controller/volume/selinuxwarning/cache/volumecache_test.go @@ -345,7 +345,7 @@ func TestVolumeCache_AddVolumeSendConflicts(t *testing.T) { expectedConflicts: []Conflict{}, }, { - name: "existing volume in a new pod with existing policy and new incomparable label (missing categories)", + name: "existing volume in a new pod with existing policy and new comparable label (missing categories)", initialPods: existingPods, podToAdd: podWithVolume{ podNamespace: "testns", @@ -354,7 +354,16 @@ func TestVolumeCache_AddVolumeSendConflicts(t *testing.T) { label: "system_u:system_r:label7", changePolicy: v1.SELinuxChangePolicyMountOption, }, - expectedConflicts: []Conflict{}, + expectedConflicts: []Conflict{ + { + PropertyName: "SELinuxLabel", + EventReason: "SELinuxLabelConflict", + Pod: cache.ObjectName{Namespace: "testns", Name: "testpod"}, + PropertyValue: "system_u:system_r:label7", + OtherPod: cache.ObjectName{Namespace: "ns7", Name: "pod7"}, + OtherPropertyValue: "::label7:c0,c1", + }, + }, }, { name: "existing volume in a new pod with existing policy and new incomparable label (missing everything)", @@ -368,6 +377,27 @@ func TestVolumeCache_AddVolumeSendConflicts(t *testing.T) { }, expectedConflicts: []Conflict{}, }, + { + name: "existing volume in a new pod with existing policy and new comparable label (missing everything but categories)", + initialPods: existingPods, + podToAdd: podWithVolume{ + podNamespace: "testns", + podName: "testpod", + volumeName: "vol8", + label: "system_u:system_r:label8:c0,c1", + changePolicy: v1.SELinuxChangePolicyMountOption, + }, + expectedConflicts: []Conflict{ + { + PropertyName: "SELinuxLabel", + EventReason: "SELinuxLabelConflict", + Pod: cache.ObjectName{Namespace: "testns", Name: "testpod"}, + PropertyValue: "system_u:system_r:label8:c0,c1", + OtherPod: cache.ObjectName{Namespace: "ns8", Name: "pod8"}, + OtherPropertyValue: "", + }, + }, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/pkg/controller/volume/selinuxwarning/translator/selinux_translator.go b/pkg/controller/volume/selinuxwarning/translator/selinux_translator.go index 88743ef2aed2a..99ce3e97dd7ca 100644 --- a/pkg/controller/volume/selinuxwarning/translator/selinux_translator.go +++ b/pkg/controller/volume/selinuxwarning/translator/selinux_translator.go @@ -60,12 +60,15 @@ func (c *ControllerSELinuxTranslator) SELinuxOptionsToFileLabel(opts *v1.SELinux // Conflicts returns true if two SELinux labels conflict. // These labels must be generated by SELinuxOptionsToFileLabel above // (the function expects strict nr. of elements in the labels). -// Since this translator cannot default missing components, -// the missing components are treated as incomparable and they do not -// conflict with anything. +// Since this translator cannot default missing label components from the operating system, +// the first three components can be empty. In this case, the empty components don't lead to a +// conflict when compared to a real SELinux label and this function returns false (as no +// conflict can be detected). +// The last component (level) is always compared, as it is not defaulted by the operating system. // Example: "system_u:system_r:container_t:s0:c1,c2" *does not* conflict with ":::s0:c1,c2", -// because the node that will run such a Pod may expand "":::s0:c1,c2" to "system_u:system_r:container_t:s0:c1,c2". -// However, "system_u:system_r:container_t:s0:c1,c2" *does* conflict with ":::s0:c98,c99". +// because the node that will run such a Pod may expand ":::s0:c1,c2" to "system_u:system_r:container_t:s0:c1,c2". +// However: "system_u:system_r:container_t:s0:c1,c2" *does* conflict with ":::s0:c98,c99". +// And ":::s0:c1,c2" *does* conflict with "" or ":::", because it's never defaulted by the OS. func (c *ControllerSELinuxTranslator) Conflicts(labelA, labelB string) bool { partsA := strings.SplitN(labelA, ":", 4) partsB := strings.SplitN(labelB, ":", 4) @@ -82,16 +85,20 @@ func (c *ControllerSELinuxTranslator) Conflicts(labelA, labelB string) bool { if partsA[i] == partsB[i] { continue } + if i == 3 { + // The last component must always match + return true + } + // i<3, empty parts are incomparable if partsA[i] == "" { - // incomparable part, no conflict continue } if partsB[i] == "" { - // incomparable part, no conflict continue } // Parts are not equal and neither of them is "" -> conflict return true } + return false } diff --git a/pkg/controller/volume/selinuxwarning/translator/selinux_translator_test.go b/pkg/controller/volume/selinuxwarning/translator/selinux_translator_test.go index 5aab3e401555b..5d5d83efe3bb8 100644 --- a/pkg/controller/volume/selinuxwarning/translator/selinux_translator_test.go +++ b/pkg/controller/volume/selinuxwarning/translator/selinux_translator_test.go @@ -93,26 +93,32 @@ func TestLabelsConflict(t *testing.T) { conflict: false, }, { - name: "empty string don't conflict with anything", + name: "empty strings don't conflict with anything except the level", a: "", b: "system_u:system_r:container_t", conflict: false, }, + { + name: "empty string conflicts with level", + a: "", + b: "system_u:system_r:container_t:s0:c1,c2", + conflict: true, + }, { name: "empty parts don't conflict with anything", - a: ":::::::::::::", + a: ":::", b: "system_u:system_r:container_t", conflict: false, }, { name: "different lengths don't conflict if the common parts are the same", - a: "system_u:system_r:container_t:c0,c2", - b: "system_u:system_r:container_t", + a: "system_u:system_r:container_t:", + b: "system_u:system_r", conflict: false, }, { name: "different lengths conflict if the common parts differ", - a: "system_u:system_r:conflict_t:c0,c2", + a: "system_u:system_r:conflict_t:", b: "system_u:system_r:container_t", conflict: true, }, @@ -125,9 +131,15 @@ func TestLabelsConflict(t *testing.T) { { name: "non-conflicting empty parts", a: "system_u::container_t", - b: ":system_r::c0,c2", + b: ":system_r::", conflict: false, }, + { + name: "empty level conflicts with non-empty level", + a: ":::s0:c1,c2", + b: "", + conflict: true, + }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { diff --git a/pkg/securitycontext/util.go b/pkg/securitycontext/util.go index 28771b6df27c5..5e000f93333ed 100644 --- a/pkg/securitycontext/util.go +++ b/pkg/securitycontext/util.go @@ -17,6 +17,10 @@ limitations under the License. package securitycontext import ( + "fmt" + "os" + "sync" + v1 "k8s.io/api/core/v1" ) @@ -188,21 +192,32 @@ func AddNoNewPrivileges(sc *v1.SecurityContext) bool { var ( // These *must* be kept in sync with moby/moby. - // https://github.com/moby/moby/blob/master/oci/defaults.go#L105-L124 - // @jessfraz will watch changes to those files upstream. - defaultMaskedPaths = []string{ - "/proc/asound", - "/proc/acpi", - "/proc/kcore", - "/proc/keys", - "/proc/latency_stats", - "/proc/timer_list", - "/proc/timer_stats", - "/proc/sched_debug", - "/proc/scsi", - "/sys/firmware", - "/sys/devices/virtual/powercap", - } + // https://github.com/moby/moby/blob/ecb03c4cdae6f323150fc11b303dcc5dc4d82416/oci/defaults.go#L190-L218 + defaultMaskedPaths = sync.OnceValue(func() []string { + maskedPaths := []string{ + "/proc/asound", + "/proc/acpi", + "/proc/interrupts", + "/proc/kcore", + "/proc/keys", + "/proc/latency_stats", + "/proc/timer_list", + "/proc/timer_stats", + "/proc/sched_debug", + "/proc/scsi", + "/sys/firmware", + "/sys/devices/virtual/powercap", + } + + for _, cpu := range possibleCPUs() { + path := fmt.Sprintf("/sys/devices/system/cpu/cpu%d/thermal_throttle", cpu) + if _, err := os.Stat(path); err == nil { + maskedPaths = append(maskedPaths, path) + } + } + + return maskedPaths + }) defaultReadonlyPaths = []string{ "/proc/bus", "/proc/fs", @@ -221,7 +236,7 @@ func ConvertToRuntimeMaskedPaths(opt *v1.ProcMountType) []string { } // Otherwise, add the default masked paths to the runtime security context. - return defaultMaskedPaths + return defaultMaskedPaths() } // ConvertToRuntimeReadonlyPaths converts the ProcMountType to the specified or default diff --git a/pkg/securitycontext/util_darwin.go b/pkg/securitycontext/util_darwin.go new file mode 100644 index 0000000000000..9d14502acb725 --- /dev/null +++ b/pkg/securitycontext/util_darwin.go @@ -0,0 +1,21 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package securitycontext + +func possibleCPUs() []int { + return nil +} diff --git a/pkg/securitycontext/util_linux.go b/pkg/securitycontext/util_linux.go new file mode 100644 index 0000000000000..bcaab4eb3e172 --- /dev/null +++ b/pkg/securitycontext/util_linux.go @@ -0,0 +1,74 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package securitycontext + +import ( + "os" + "runtime" + "strconv" + "strings" + "sync" +) + +// possibleCPUs returns the number of possible CPUs on this host. +func possibleCPUs() (cpus []int) { + if ncpu := possibleCPUsParsed(); ncpu != nil { + return ncpu + } + + for i := range runtime.NumCPU() { + cpus = append(cpus, i) + } + + return cpus +} + +// possibleCPUsParsed is parsing the amount of possible CPUs on this host from +// /sys/devices. +var possibleCPUsParsed = sync.OnceValue(func() (cpus []int) { + data, err := os.ReadFile("/sys/devices/system/cpu/possible") + if err != nil { + return nil + } + + ranges := strings.Split(strings.TrimSpace(string(data)), ",") + + for _, r := range ranges { + if rStart, rEnd, ok := strings.Cut(r, "-"); !ok { + cpu, err := strconv.Atoi(rStart) + if err != nil { + return nil + } + cpus = append(cpus, cpu) + } else { + var start, end int + start, err := strconv.Atoi(rStart) + if err != nil { + return nil + } + end, err = strconv.Atoi(rEnd) + if err != nil { + return nil + } + for i := start; i <= end; i++ { + cpus = append(cpus, i) + } + } + } + + return cpus +}) diff --git a/pkg/securitycontext/util_test.go b/pkg/securitycontext/util_test.go index 9711262058a0d..87e086e6c33e5 100644 --- a/pkg/securitycontext/util_test.go +++ b/pkg/securitycontext/util_test.go @@ -73,11 +73,11 @@ func TestConvertToRuntimeMaskedPaths(t *testing.T) { }{ "procMount nil": { pm: nil, - expect: defaultMaskedPaths, + expect: defaultMaskedPaths(), }, "procMount default": { pm: &dPM, - expect: defaultMaskedPaths, + expect: defaultMaskedPaths(), }, "procMount unmasked": { pm: &uPM, diff --git a/pkg/securitycontext/util_windows.go b/pkg/securitycontext/util_windows.go new file mode 100644 index 0000000000000..9d14502acb725 --- /dev/null +++ b/pkg/securitycontext/util_windows.go @@ -0,0 +1,21 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package securitycontext + +func possibleCPUs() []int { + return nil +} diff --git a/plugin/pkg/admission/noderestriction/admission.go b/plugin/pkg/admission/noderestriction/admission.go index b85989dbd28c1..b841c853251e8 100644 --- a/plugin/pkg/admission/noderestriction/admission.go +++ b/plugin/pkg/admission/noderestriction/admission.go @@ -536,6 +536,11 @@ func (p *Plugin) admitNode(nodeName string, a admission.Attributes) error { return admission.NewForbidden(a, fmt.Errorf("node %q is not allowed to modify taints", nodeName)) } + // Don't allow a node to update its own ownerReferences. + if !apiequality.Semantic.DeepEqual(node.OwnerReferences, oldNode.OwnerReferences) { + return admission.NewForbidden(a, fmt.Errorf("node %q is not allowed to modify ownerReferences", nodeName)) + } + // Don't allow a node to update labels outside the allowed set. // This would allow a node to add or modify its labels in a way that would let it steer privileged workloads to itself. modifiedLabels := getModifiedLabels(node.Labels, oldNode.Labels) diff --git a/plugin/pkg/admission/noderestriction/admission_test.go b/plugin/pkg/admission/noderestriction/admission_test.go index 0cd7c881f1a84..77b077dcd68fd 100644 --- a/plugin/pkg/admission/noderestriction/admission_test.go +++ b/plugin/pkg/admission/noderestriction/admission_test.go @@ -260,10 +260,14 @@ func (a *admitTestCase) run(t *testing.T) { func Test_nodePlugin_Admit(t *testing.T) { var ( - mynode = &user.DefaultInfo{Name: "system:node:mynode", Groups: []string{"system:nodes"}} - bob = &user.DefaultInfo{Name: "bob"} + trueRef = true + mynode = &user.DefaultInfo{Name: "system:node:mynode", Groups: []string{"system:nodes"}} + bob = &user.DefaultInfo{Name: "bob"} + + mynodeObjMeta = metav1.ObjectMeta{Name: "mynode", UID: "mynode-uid"} + mynodeObjMetaOwnerRefA = metav1.ObjectMeta{Name: "mynode", UID: "mynode-uid", OwnerReferences: []metav1.OwnerReference{{Name: "fooerA", Controller: &trueRef}}} + mynodeObjMetaOwnerRefB = metav1.ObjectMeta{Name: "mynode", UID: "mynode-uid", OwnerReferences: []metav1.OwnerReference{{Name: "fooerB", Controller: &trueRef}}} - mynodeObjMeta = metav1.ObjectMeta{Name: "mynode", UID: "mynode-uid"} mynodeObj = &api.Node{ObjectMeta: mynodeObjMeta} mynodeObjConfigA = &api.Node{ObjectMeta: mynodeObjMeta, Spec: api.NodeSpec{ConfigSource: &api.NodeConfigSource{ ConfigMap: &api.ConfigMapNodeConfigSource{ @@ -280,9 +284,11 @@ func Test_nodePlugin_Admit(t *testing.T) { KubeletConfigKey: "kubelet", }}}} - mynodeObjTaintA = &api.Node{ObjectMeta: mynodeObjMeta, Spec: api.NodeSpec{Taints: []api.Taint{{Key: "mykey", Value: "A"}}}} - mynodeObjTaintB = &api.Node{ObjectMeta: mynodeObjMeta, Spec: api.NodeSpec{Taints: []api.Taint{{Key: "mykey", Value: "B"}}}} - othernodeObj = &api.Node{ObjectMeta: metav1.ObjectMeta{Name: "othernode"}} + mynodeObjTaintA = &api.Node{ObjectMeta: mynodeObjMeta, Spec: api.NodeSpec{Taints: []api.Taint{{Key: "mykey", Value: "A"}}}} + mynodeObjTaintB = &api.Node{ObjectMeta: mynodeObjMeta, Spec: api.NodeSpec{Taints: []api.Taint{{Key: "mykey", Value: "B"}}}} + mynodeObjOwnerRefA = &api.Node{ObjectMeta: mynodeObjMetaOwnerRefA} + mynodeObjOwnerRefB = &api.Node{ObjectMeta: mynodeObjMetaOwnerRefB} + othernodeObj = &api.Node{ObjectMeta: metav1.ObjectMeta{Name: "othernode"}} coremymirrorpod, v1mymirrorpod = makeTestPod("ns", "mymirrorpod", "mynode", true) coreothermirrorpod, v1othermirrorpod = makeTestPod("ns", "othermirrorpod", "othernode", true) @@ -1222,6 +1228,24 @@ func Test_nodePlugin_Admit(t *testing.T) { attributes: admission.NewAttributesRecord(setForbiddenUpdateLabels(mynodeObj, "new"), setForbiddenUpdateLabels(mynodeObj, "old"), nodeKind, mynodeObj.Namespace, mynodeObj.Name, nodeResource, "", admission.Update, &metav1.UpdateOptions{}, false, mynode), err: `is not allowed to modify labels: foo.node-restriction.kubernetes.io/foo, node-restriction.kubernetes.io/foo, other.k8s.io/foo, other.kubernetes.io/foo`, }, + { + name: "forbid update of my node: add owner reference", + podsGetter: existingPods, + attributes: admission.NewAttributesRecord(mynodeObjOwnerRefA, mynodeObj, nodeKind, mynodeObj.Namespace, mynodeObj.Name, nodeResource, "", admission.Update, &metav1.UpdateOptions{}, false, mynode), + err: "node \"mynode\" is not allowed to modify ownerReferences", + }, + { + name: "forbid update of my node: remove owner reference", + podsGetter: existingPods, + attributes: admission.NewAttributesRecord(mynodeObj, mynodeObjOwnerRefA, nodeKind, mynodeObj.Namespace, mynodeObj.Name, nodeResource, "", admission.Update, &metav1.UpdateOptions{}, false, mynode), + err: "node \"mynode\" is not allowed to modify ownerReferences", + }, + { + name: "forbid update of my node: change owner reference", + podsGetter: existingPods, + attributes: admission.NewAttributesRecord(mynodeObjOwnerRefA, mynodeObjOwnerRefB, nodeKind, mynodeObj.Namespace, mynodeObj.Name, nodeResource, "", admission.Update, &metav1.UpdateOptions{}, false, mynode), + err: "node \"mynode\" is not allowed to modify ownerReferences", + }, // Other node object { diff --git a/staging/publishing/rules.yaml b/staging/publishing/rules.yaml index 076e059e33c3e..6b54eaa3fcf57 100644 --- a/staging/publishing/rules.yaml +++ b/staging/publishing/rules.yaml @@ -2901,4 +2901,4 @@ rules: - staging/src/k8s.io/externaljwt recursive-delete-patterns: - '*/.gitattributes' -default-go-version: 1.24.4 +default-go-version: 1.24.6 diff --git a/staging/src/k8s.io/component-helpers/resource/helpers.go b/staging/src/k8s.io/component-helpers/resource/helpers.go index 780db54245168..7ff5bef111db5 100644 --- a/staging/src/k8s.io/component-helpers/resource/helpers.go +++ b/staging/src/k8s.io/component-helpers/resource/helpers.go @@ -404,7 +404,12 @@ func maxResourceList(list, newList v1.ResourceList) { // max returns the result of max(a, b...) for each named resource and is only used if we can't // accumulate into an existing resource list func max(a v1.ResourceList, b ...v1.ResourceList) v1.ResourceList { - result := a.DeepCopy() + var result v1.ResourceList + if a != nil { + result = a.DeepCopy() + } else { + result = v1.ResourceList{} + } for _, other := range b { maxResourceList(result, other) } diff --git a/staging/src/k8s.io/component-helpers/resource/helpers_test.go b/staging/src/k8s.io/component-helpers/resource/helpers_test.go index 19849b091138f..5cfdd016d3665 100644 --- a/staging/src/k8s.io/component-helpers/resource/helpers_test.go +++ b/staging/src/k8s.io/component-helpers/resource/helpers_test.go @@ -23,6 +23,7 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/utils/ptr" ) func TestPodRequestsAndLimits(t *testing.T) { @@ -1967,11 +1968,14 @@ func TestIsSupportedPodLevelResource(t *testing.T) { func TestAggregateContainerRequestsAndLimits(t *testing.T) { restartAlways := v1.ContainerRestartPolicyAlways cases := []struct { - containers []v1.Container - initContainers []v1.Container - name string - expectedRequests v1.ResourceList - expectedLimits v1.ResourceList + options PodResourcesOptions + containers []v1.Container + containerStatuses []v1.ContainerStatus + initContainers []v1.Container + initContainerStatuses []v1.ContainerStatus + name string + expectedRequests v1.ResourceList + expectedLimits v1.ResourceList }{ { name: "one container with limits", @@ -2135,20 +2139,74 @@ func TestAggregateContainerRequestsAndLimits(t *testing.T) { v1.ResourceName(v1.ResourceCPU): resource.MustParse("17"), }, }, + { + name: "regularcontainers with empty requests, but status with non-empty requests", + options: PodResourcesOptions{UseStatusResources: true}, + containers: []v1.Container{ + { + Name: "container-1", + Resources: v1.ResourceRequirements{}, + }, + }, + containerStatuses: []v1.ContainerStatus{ + { + Name: "container-1", + Resources: &v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("2"), + }, + }, + }, + }, + expectedRequests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("2"), + }, + expectedLimits: v1.ResourceList{}, + }, + { + name: "always-restart init containers with empty requests, but status with non-empty requests", + options: PodResourcesOptions{UseStatusResources: true}, + initContainers: []v1.Container{ + { + Name: "container-1", + RestartPolicy: ptr.To[v1.ContainerRestartPolicy](v1.ContainerRestartPolicyAlways), + Resources: v1.ResourceRequirements{}, + }, + }, + initContainerStatuses: []v1.ContainerStatus{ + { + Name: "container-1", + Resources: &v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("2"), + }, + }, + }, + }, + expectedRequests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("2"), + }, + expectedLimits: v1.ResourceList{}, + }, } for idx, tc := range cases { - testPod := &v1.Pod{Spec: v1.PodSpec{Containers: tc.containers, InitContainers: tc.initContainers}} - resRequests := AggregateContainerRequests(testPod, PodResourcesOptions{}) - resLimits := AggregateContainerLimits(testPod, PodResourcesOptions{}) + t.Run(tc.name, func(t *testing.T) { + testPod := &v1.Pod{ + Spec: v1.PodSpec{Containers: tc.containers, InitContainers: tc.initContainers}, + Status: v1.PodStatus{ContainerStatuses: tc.containerStatuses, InitContainerStatuses: tc.initContainerStatuses}, + } + resRequests := AggregateContainerRequests(testPod, tc.options) + resLimits := AggregateContainerLimits(testPod, tc.options) - if !equality.Semantic.DeepEqual(tc.expectedRequests, resRequests) { - t.Errorf("test case failure[%d]: %v, requests:\n expected:\t%v\ngot\t\t%v", idx, tc.name, tc.expectedRequests, resRequests) - } + if !equality.Semantic.DeepEqual(tc.expectedRequests, resRequests) { + t.Errorf("test case failure[%d]: %v, requests:\n expected:\t%v\ngot\t\t%v", idx, tc.name, tc.expectedRequests, resRequests) + } - if !equality.Semantic.DeepEqual(tc.expectedLimits, resLimits) { - t.Errorf("test case failure[%d]: %v, limits:\n expected:\t%v\ngot\t\t%v", idx, tc.name, tc.expectedLimits, resLimits) - } + if !equality.Semantic.DeepEqual(tc.expectedLimits, resLimits) { + t.Errorf("test case failure[%d]: %v, limits:\n expected:\t%v\ngot\t\t%v", idx, tc.name, tc.expectedLimits, resLimits) + } + }) } } diff --git a/test/e2e/common/node/runtime.go b/test/e2e/common/node/runtime.go index fedf1241c28da..8caf4cf4c40a4 100644 --- a/test/e2e/common/node/runtime.go +++ b/test/e2e/common/node/runtime.go @@ -19,13 +19,10 @@ package node import ( "context" "fmt" - "os" "path" "time" v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/kubernetes/pkg/kubelet/images" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" @@ -262,7 +259,7 @@ while true; do sleep 1; done // Images used for ConformanceContainer are not added into NodePrePullImageList, because this test is // testing image pulling, these images don't need to be prepulled. The ImagePullPolicy // is v1.PullAlways, so it won't be blocked by framework image pre-pull list check. - imagePullTest := func(ctx context.Context, image string, hasSecret bool, expectedPhase v1.PodPhase, expectedPullStatus bool, windowsImage bool) { + imagePullTest := func(ctx context.Context, image string, expectedPhase v1.PodPhase, expectedPullStatus bool, windowsImage bool) { command := []string{"/bin/sh", "-c", "while true; do sleep 1; done"} if windowsImage { // -t: Ping the specified host until stopped. @@ -278,34 +275,7 @@ while true; do sleep 1; done }, RestartPolicy: v1.RestartPolicyNever, } - if hasSecret { - // The service account only has pull permission - auth := ` -{ - "auths": { - "https://gcr.io": { - "auth": "X2pzb25fa2V5OnsKICAidHlwZSI6ICJzZXJ2aWNlX2FjY291bnQiLAogICJwcm9qZWN0X2lkIjogImF1dGhlbnRpY2F0ZWQtaW1hZ2UtcHVsbGluZyIsCiAgInByaXZhdGVfa2V5X2lkIjogImI5ZjJhNjY0YWE5YjIwNDg0Y2MxNTg2MDYzZmVmZGExOTIyNGFjM2IiLAogICJwcml2YXRlX2tleSI6ICItLS0tLUJFR0lOIFBSSVZBVEUgS0VZLS0tLS1cbk1JSUV2UUlCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktjd2dnU2pBZ0VBQW9JQkFRQzdTSG5LVEVFaVlMamZcbkpmQVBHbUozd3JCY2VJNTBKS0xxS21GWE5RL3REWGJRK2g5YVl4aldJTDhEeDBKZTc0bVovS01uV2dYRjVLWlNcbm9BNktuSU85Yi9SY1NlV2VpSXRSekkzL1lYVitPNkNjcmpKSXl4anFWam5mVzJpM3NhMzd0OUE5VEZkbGZycm5cbjR6UkpiOWl4eU1YNGJMdHFGR3ZCMDNOSWl0QTNzVlo1ODhrb1FBZmgzSmhhQmVnTWorWjRSYko0aGVpQlFUMDNcbnZVbzViRWFQZVQ5RE16bHdzZWFQV2dydDZOME9VRGNBRTl4bGNJek11MjUzUG4vSzgySFpydEx4akd2UkhNVXhcbng0ZjhwSnhmQ3h4QlN3Z1NORit3OWpkbXR2b0wwRmE3ZGducFJlODZWRDY2ejNZenJqNHlLRXRqc2hLZHl5VWRcbkl5cVhoN1JSQWdNQkFBRUNnZ0VBT3pzZHdaeENVVlFUeEFka2wvSTVTRFVidi9NazRwaWZxYjJEa2FnbmhFcG9cbjFJajJsNGlWMTByOS9uenJnY2p5VlBBd3pZWk1JeDFBZVF0RDdoUzRHWmFweXZKWUc3NkZpWFpQUm9DVlB6b3VcbmZyOGRDaWFwbDV0enJDOWx2QXNHd29DTTdJWVRjZmNWdDdjRTEyRDNRS3NGNlo3QjJ6ZmdLS251WVBmK0NFNlRcbmNNMHkwaCtYRS9kMERvSERoVy96YU1yWEhqOFRvd2V1eXRrYmJzNGYvOUZqOVBuU2dET1lQd2xhbFZUcitGUWFcbkpSd1ZqVmxYcEZBUW14M0Jyd25rWnQzQ2lXV2lGM2QrSGk5RXRVYnRWclcxYjZnK1JRT0licWFtcis4YlJuZFhcbjZWZ3FCQWtKWjhSVnlkeFVQMGQxMUdqdU9QRHhCbkhCbmM0UW9rSXJFUUtCZ1FEMUNlaWN1ZGhXdGc0K2dTeGJcbnplanh0VjFONDFtZHVjQnpvMmp5b1dHbzNQVDh3ckJPL3lRRTM0cU9WSi9pZCs4SThoWjRvSWh1K0pBMDBzNmdcblRuSXErdi9kL1RFalk4MW5rWmlDa21SUFdiWHhhWXR4UjIxS1BYckxOTlFKS2ttOHRkeVh5UHFsOE1veUdmQ1dcbjJ2aVBKS05iNkhabnY5Q3lqZEo5ZzJMRG5RS0JnUUREcVN2eURtaGViOTIzSW96NGxlZ01SK205Z2xYVWdTS2dcbkVzZlllbVJmbU5XQitDN3ZhSXlVUm1ZNU55TXhmQlZXc3dXRldLYXhjK0krYnFzZmx6elZZdFpwMThNR2pzTURcbmZlZWZBWDZCWk1zVXQ3Qmw3WjlWSjg1bnRFZHFBQ0xwWitaLzN0SVJWdWdDV1pRMWhrbmxHa0dUMDI0SkVFKytcbk55SDFnM2QzUlFLQmdRQ1J2MXdKWkkwbVBsRklva0tGTkh1YTBUcDNLb1JTU1hzTURTVk9NK2xIckcxWHJtRjZcbkMwNGNTKzQ0N0dMUkxHOFVUaEpKbTRxckh0Ti9aK2dZOTYvMm1xYjRIakpORDM3TVhKQnZFYTN5ZUxTOHEvK1JcbjJGOU1LamRRaU5LWnhQcG84VzhOSlREWTVOa1BaZGh4a2pzSHdVNGRTNjZwMVRESUU0MGd0TFpaRFFLQmdGaldcbktyblFpTnEzOS9iNm5QOFJNVGJDUUFKbmR3anhTUU5kQTVmcW1rQTlhRk9HbCtqamsxQ1BWa0tNSWxLSmdEYkpcbk9heDl2OUc2Ui9NSTFIR1hmV3QxWU56VnRocjRIdHNyQTB0U3BsbWhwZ05XRTZWejZuQURqdGZQSnMyZUdqdlhcbmpQUnArdjhjY21MK3dTZzhQTGprM3ZsN2VlNXJsWWxNQndNdUdjUHhBb0dBZWRueGJXMVJMbVZubEFpSEx1L0xcbmxtZkF3RFdtRWlJMFVnK1BMbm9Pdk81dFE1ZDRXMS94RU44bFA0cWtzcGtmZk1Rbk5oNFNZR0VlQlQzMlpxQ1RcbkpSZ2YwWGpveXZ2dXA5eFhqTWtYcnBZL3ljMXpmcVRaQzBNTzkvMVVjMWJSR2RaMmR5M2xSNU5XYXA3T1h5Zk9cblBQcE5Gb1BUWGd2M3FDcW5sTEhyR3pNPVxuLS0tLS1FTkQgUFJJVkFURSBLRVktLS0tLVxuIiwKICAiY2xpZW50X2VtYWlsIjogImltYWdlLXB1bGxpbmdAYXV0aGVudGljYXRlZC1pbWFnZS1wdWxsaW5nLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwKICAiY2xpZW50X2lkIjogIjExMzc5NzkxNDUzMDA3MzI3ODcxMiIsCiAgImF1dGhfdXJpIjogImh0dHBzOi8vYWNjb3VudHMuZ29vZ2xlLmNvbS9vL29hdXRoMi9hdXRoIiwKICAidG9rZW5fdXJpIjogImh0dHBzOi8vYWNjb3VudHMuZ29vZ2xlLmNvbS9vL29hdXRoMi90b2tlbiIsCiAgImF1dGhfcHJvdmlkZXJfeDUwOV9jZXJ0X3VybCI6ICJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9vYXV0aDIvdjEvY2VydHMiLAogICJjbGllbnRfeDUwOV9jZXJ0X3VybCI6ICJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9yb2JvdC92MS9tZXRhZGF0YS94NTA5L2ltYWdlLXB1bGxpbmclNDBhdXRoZW50aWNhdGVkLWltYWdlLXB1bGxpbmcuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iCn0=", - "email": "image-pulling@authenticated-image-pulling.iam.gserviceaccount.com" - } - } -}` - // we might be told to use a different docker config JSON. - if framework.TestContext.DockerConfigFile != "" { - contents, err := os.ReadFile(framework.TestContext.DockerConfigFile) - framework.ExpectNoError(err) - auth = string(contents) - } - secret := &v1.Secret{ - Data: map[string][]byte{v1.DockerConfigJsonKey: []byte(auth)}, - Type: v1.SecretTypeDockerConfigJson, - } - secret.Name = "image-pull-secret-" + string(uuid.NewUUID()) - ginkgo.By("create image pull secret") - _, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(ctx, secret, metav1.CreateOptions{}) - framework.ExpectNoError(err) - ginkgo.DeferCleanup(f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete, secret.Name, metav1.DeleteOptions{}) - container.ImagePullSecrets = []string{secret.Name} - } + // checkContainerStatus checks whether the container status matches expectation. checkContainerStatus := func(ctx context.Context) error { status, err := container.GetStatus(ctx) @@ -370,29 +340,24 @@ while true; do sleep 1; done f.It("should not be able to pull image from invalid registry", f.WithNodeConformance(), func(ctx context.Context) { image := imageutils.GetE2EImage(imageutils.InvalidRegistryImage) - imagePullTest(ctx, image, false, v1.PodPending, true, false) + imagePullTest(ctx, image, v1.PodPending, true, false) }) f.It("should be able to pull image", f.WithNodeConformance(), func(ctx context.Context) { // NOTE(claudiub): The agnhost image is supposed to work on both Linux and Windows. image := imageutils.GetE2EImage(imageutils.Agnhost) - imagePullTest(ctx, image, false, v1.PodRunning, false, false) + imagePullTest(ctx, image, v1.PodRunning, false, false) }) + // TODO: https://github.com/kubernetes/kubernetes/issues/130271 + // Switch this to use a locally hosted private image and not depend on this host f.It("should not be able to pull from private registry without secret", f.WithNodeConformance(), func(ctx context.Context) { image := imageutils.GetE2EImage(imageutils.AuthenticatedAlpine) - imagePullTest(ctx, image, false, v1.PodPending, true, false) + imagePullTest(ctx, image, v1.PodPending, true, false) }) - f.It("should be able to pull from private registry with secret", f.WithNodeConformance(), func(ctx context.Context) { - image := imageutils.GetE2EImage(imageutils.AuthenticatedAlpine) - isWindows := false - if framework.NodeOSDistroIs("windows") { - image = imageutils.GetE2EImage(imageutils.AuthenticatedWindowsNanoServer) - isWindows = true - } - imagePullTest(ctx, image, true, v1.PodRunning, false, isWindows) - }) + // TODO: https://github.com/kubernetes/kubernetes/issues/130271 + // Add a sustainable test for pulling with a private registry secret }) }) }) diff --git a/test/e2e/network/service_cidrs.go b/test/e2e/network/service_cidrs.go index b1b63ba6924d3..b1fcfcde2beae 100644 --- a/test/e2e/network/service_cidrs.go +++ b/test/e2e/network/service_cidrs.go @@ -167,33 +167,6 @@ var _ = common.SIGDescribe("ServiceCIDR and IPAddress API", func() { framework.Failf("unexpected error getting default ServiceCIDR: %v", err) } - ginkgo.By("patching") - patchedServiceCIDR, err := f.ClientSet.NetworkingV1().ServiceCIDRs().Patch(ctx, defaultservicecidr.DefaultServiceCIDRName, types.MergePatchType, []byte(`{"metadata":{"annotations":{"patched":"true"}}}`), metav1.PatchOptions{}) - if err != nil { - framework.Failf("unexpected error patching IPAddress: %v", err) - } - if v, ok := patchedServiceCIDR.Annotations["patched"]; !ok || v != "true" { - framework.Failf("patched object should have the applied annotation") - } - - ginkgo.By("updating") - var cidrToUpdate, updatedCIDR *networkingv1.ServiceCIDR - err = retry.RetryOnConflict(retry.DefaultRetry, func() error { - cidrToUpdate, err = f.ClientSet.NetworkingV1().ServiceCIDRs().Get(ctx, defaultservicecidr.DefaultServiceCIDRName, metav1.GetOptions{}) - if err != nil { - return err - } - cidrToUpdate.Annotations["updated"] = "true" - updatedCIDR, err = f.ClientSet.NetworkingV1().ServiceCIDRs().Update(ctx, cidrToUpdate, metav1.UpdateOptions{}) - return err - }) - if err != nil { - framework.Failf("unexpected error updating IPAddress: %v", err) - } - if v, ok := updatedCIDR.Annotations["updated"]; !ok || v != "true" { - framework.Failf("updated object should have the applied annotation") - } - ginkgo.By("listing") list, err := f.ClientSet.NetworkingV1().ServiceCIDRs().List(ctx, metav1.ListOptions{}) if err != nil { diff --git a/test/e2e/storage/csimock/base.go b/test/e2e/storage/csimock/base.go index d58a7b33c57c5..a75761450b62b 100644 --- a/test/e2e/storage/csimock/base.go +++ b/test/e2e/storage/csimock/base.go @@ -463,7 +463,7 @@ func (m *mockDriverSetup) createPodWithFSGroup(ctx context.Context, fsGroup *int return class, claim, pod } -func (m *mockDriverSetup) createPodWithSELinux(ctx context.Context, accessModes []v1.PersistentVolumeAccessMode, mountOptions []string, seLinuxOpts *v1.SELinuxOptions, policy *v1.PodSELinuxChangePolicy) (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) { +func (m *mockDriverSetup) createPodWithSELinux(ctx context.Context, accessModes []v1.PersistentVolumeAccessMode, mountOptions []string, seLinuxOpts *v1.SELinuxOptions, policy *v1.PodSELinuxChangePolicy, privileged bool) (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) { ginkgo.By("Creating pod with SELinux context") f := m.f nodeSelection := m.config.ClientNodeSelection @@ -480,7 +480,7 @@ func (m *mockDriverSetup) createPodWithSELinux(ctx context.Context, accessModes ReclaimPolicy: m.tp.reclaimPolicy, } class, claim := createClaim(ctx, f.ClientSet, scTest, nodeSelection, m.tp.scName, f.Namespace.Name, accessModes) - pod, err := startPausePodWithSELinuxOptions(f.ClientSet, claim, nodeSelection, f.Namespace.Name, seLinuxOpts, policy) + pod, err := startPausePodWithSELinuxOptions(f.ClientSet, claim, nodeSelection, f.Namespace.Name, seLinuxOpts, policy, privileged) framework.ExpectNoError(err, "Failed to create pause pod with SELinux context %s: %v", seLinuxOpts, err) if class != nil { @@ -802,7 +802,7 @@ func startBusyBoxPodWithVolumeSource(cs clientset.Interface, volumeSource v1.Vol return cs.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) } -func startPausePodWithSELinuxOptions(cs clientset.Interface, pvc *v1.PersistentVolumeClaim, node e2epod.NodeSelection, ns string, seLinuxOpts *v1.SELinuxOptions, policy *v1.PodSELinuxChangePolicy) (*v1.Pod, error) { +func startPausePodWithSELinuxOptions(cs clientset.Interface, pvc *v1.PersistentVolumeClaim, node e2epod.NodeSelection, ns string, seLinuxOpts *v1.SELinuxOptions, policy *v1.PodSELinuxChangePolicy, privileged bool) (*v1.Pod, error) { pod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "pvc-volume-tester-", @@ -816,6 +816,9 @@ func startPausePodWithSELinuxOptions(cs clientset.Interface, pvc *v1.PersistentV { Name: "volume-tester", Image: imageutils.GetE2EImage(imageutils.Pause), + SecurityContext: &v1.SecurityContext{ + Privileged: &privileged, + }, VolumeMounts: []v1.VolumeMount{ { Name: "my-volume", diff --git a/test/e2e/storage/csimock/csi_selinux_mount.go b/test/e2e/storage/csimock/csi_selinux_mount.go index c3ed53418e1d2..3a7045cbaf905 100644 --- a/test/e2e/storage/csimock/csi_selinux_mount.go +++ b/test/e2e/storage/csimock/csi_selinux_mount.go @@ -298,7 +298,7 @@ var _ = utils.SIGDescribe("CSI Mock selinux on mount", func() { // Act ginkgo.By("Starting the initial pod") accessModes := []v1.PersistentVolumeAccessMode{t.volumeMode} - _, claim, pod := m.createPodWithSELinux(ctx, accessModes, t.mountOptions, t.firstPodSELinuxOpts, t.firstPodChangePolicy) + _, claim, pod := m.createPodWithSELinux(ctx, accessModes, t.mountOptions, t.firstPodSELinuxOpts, t.firstPodChangePolicy, false /* privileged */) err := e2epod.WaitForPodNameRunningInNamespace(ctx, m.cs, pod.Name, pod.Namespace) framework.ExpectNoError(err, "starting the initial pod") @@ -331,7 +331,7 @@ var _ = utils.SIGDescribe("CSI Mock selinux on mount", func() { pod, err = m.cs.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "getting the initial pod") nodeSelection := e2epod.NodeSelection{Name: pod.Spec.NodeName} - pod2, err := startPausePodWithSELinuxOptions(f.ClientSet, claim, nodeSelection, f.Namespace.Name, t.secondPodSELinuxOpts, t.secondPodChangePolicy) + pod2, err := startPausePodWithSELinuxOptions(f.ClientSet, claim, nodeSelection, f.Namespace.Name, t.secondPodSELinuxOpts, t.secondPodChangePolicy, false /* privileged */) framework.ExpectNoError(err, "creating second pod with SELinux context %s", t.secondPodSELinuxOpts) m.pods = append(m.pods, pod2) @@ -453,8 +453,10 @@ var _ = utils.SIGDescribe("CSI Mock selinux on mount metrics and SELinuxWarningC csiDriverSELinuxEnabled bool firstPodSELinuxOpts *v1.SELinuxOptions firstPodChangePolicy *v1.PodSELinuxChangePolicy + firstPodPrivileged bool secondPodSELinuxOpts *v1.SELinuxOptions secondPodChangePolicy *v1.PodSELinuxChangePolicy + secondPodPrivileged bool volumeMode v1.PersistentVolumeAccessMode waitForSecondPodStart bool secondPodFailureEvent string @@ -599,7 +601,7 @@ var _ = utils.SIGDescribe("CSI Mock selinux on mount metrics and SELinuxWarningC testTags: []interface{}{framework.WithFeatureGate(features.SELinuxMount)}, }, { - name: "error is not bumped on two Pods with a different policy RWX volume (nil + MountOption)", + name: "error is not bumped on two Pods with the same policy RWX volume (nil + MountOption)", csiDriverSELinuxEnabled: true, firstPodSELinuxOpts: &seLinuxOpts1, firstPodChangePolicy: &mount, @@ -611,7 +613,7 @@ var _ = utils.SIGDescribe("CSI Mock selinux on mount metrics and SELinuxWarningC testTags: []interface{}{framework.WithFeatureGate(features.SELinuxMount)}, }, { - name: "error is not bumped on two Pods with a different policy RWX volume (MountOption + MountOption)", + name: "error is not bumped on two Pods with the same policy RWX volume (MountOption + MountOption)", csiDriverSELinuxEnabled: true, firstPodSELinuxOpts: &seLinuxOpts1, firstPodChangePolicy: &mount, @@ -648,6 +650,75 @@ var _ = utils.SIGDescribe("CSI Mock selinux on mount metrics and SELinuxWarningC expectControllerConflictProperty: "SELinuxLabel", testTags: []interface{}{framework.WithFeatureGate(features.SELinuxMount)}, }, + { + name: "error is not bumped on two privileged Pods with mount policy RWO volume", + csiDriverSELinuxEnabled: true, + firstPodSELinuxOpts: nil, /* privileged Pods are typically without SELinux context */ + firstPodPrivileged: true, + firstPodChangePolicy: &recursive, + secondPodSELinuxOpts: nil, /* privileged Pods are typically without SELinux context */ + secondPodPrivileged: true, + secondPodChangePolicy: &recursive, + volumeMode: v1.ReadWriteOnce, + waitForSecondPodStart: true, + expectNodeIncreases: sets.New[string]( /* no metric is increased, admitted_total was already increased when the first pod started */ ), + testTags: []interface{}{framework.WithFeatureGate(features.SELinuxMount)}, + }, + { + name: "error is not bumped on two privileged Pods with recursive policy RWO volume", + csiDriverSELinuxEnabled: true, + firstPodSELinuxOpts: nil, /* privileged Pods are typically without SELinux context */ + firstPodPrivileged: true, + firstPodChangePolicy: &mount, + secondPodSELinuxOpts: nil, /* privileged Pods are typically without SELinux context */ + secondPodPrivileged: true, + secondPodChangePolicy: &mount, + volumeMode: v1.ReadWriteOnce, + waitForSecondPodStart: true, + expectNodeIncreases: sets.New[string]( /* no metric is increased, admitted_total was already increased when the first pod started */ ), + testTags: []interface{}{framework.WithFeatureGate(features.SELinuxMount)}, + }, + { + name: "error is not bumped on a privileged and unprivileged Pod with given SELinux context and recursive policy", + csiDriverSELinuxEnabled: true, + firstPodSELinuxOpts: nil, /* privileged Pods are typically without SELinux context */ + firstPodPrivileged: true, + secondPodSELinuxOpts: &seLinuxOpts1, + secondPodChangePolicy: &recursive, + secondPodPrivileged: false, + volumeMode: v1.ReadWriteMany, + waitForSecondPodStart: true, + expectNodeIncreases: sets.New[string]( /* no metric is increased, admitted_total was already increased when the first pod started */ ), + testTags: []interface{}{framework.WithFeatureGate(features.SELinuxMount)}, + }, + { + name: "error is bumped on a privileged and unprivileged Pod with given SELinux with MountOption policy", + csiDriverSELinuxEnabled: true, + firstPodSELinuxOpts: nil, /* privileged Pods are typically without SELinux context */ + firstPodPrivileged: true, + secondPodSELinuxOpts: &seLinuxOpts1, + secondPodChangePolicy: &mount, + secondPodFailureEvent: "conflicting SELinux labels of volume", + volumeMode: v1.ReadWriteOncePod, + waitForSecondPodStart: false, + expectNodeIncreases: sets.New[string]("volume_manager_selinux_volume_context_mismatch_errors_total"), + expectControllerConflictProperty: "SELinuxLabel", + testTags: []interface{}{framework.WithFeatureGate(features.SELinuxMount)}, + }, + { + name: "error is bumped on an unprivileged and privileged Pod with given SELinux with MountOption policy", + csiDriverSELinuxEnabled: true, + firstPodSELinuxOpts: &seLinuxOpts1, + firstPodChangePolicy: &mount, + secondPodSELinuxOpts: nil, /* privileged Pods are typically without SELinux context */ + secondPodPrivileged: true, + secondPodFailureEvent: "conflicting SELinux labels of volume", + volumeMode: v1.ReadWriteOncePod, + waitForSecondPodStart: false, + expectNodeIncreases: sets.New[string]("volume_manager_selinux_volume_context_mismatch_errors_total"), + expectControllerConflictProperty: "SELinuxLabel", + testTags: []interface{}{framework.WithFeatureGate(features.SELinuxMount)}, + }, } for _, t := range tests { t := t @@ -673,7 +744,7 @@ var _ = utils.SIGDescribe("CSI Mock selinux on mount metrics and SELinuxWarningC ginkgo.By("Starting the first pod") accessModes := []v1.PersistentVolumeAccessMode{t.volumeMode} - _, claim, pod := m.createPodWithSELinux(ctx, accessModes, []string{}, t.firstPodSELinuxOpts, t.firstPodChangePolicy) + _, claim, pod := m.createPodWithSELinux(ctx, accessModes, []string{}, t.firstPodSELinuxOpts, t.firstPodChangePolicy, t.firstPodPrivileged) err = e2epod.WaitForPodNameRunningInNamespace(ctx, m.cs, pod.Name, pod.Namespace) framework.ExpectNoError(err, "starting the initial pod") @@ -688,7 +759,7 @@ var _ = utils.SIGDescribe("CSI Mock selinux on mount metrics and SELinuxWarningC ginkgo.By("Starting the second pod") // Skip scheduler, it would block scheduling the second pod with ReadWriteOncePod PV. nodeSelection := e2epod.NodeSelection{Name: pod.Spec.NodeName} - pod2, err := startPausePodWithSELinuxOptions(f.ClientSet, claim, nodeSelection, f.Namespace.Name, t.secondPodSELinuxOpts, t.secondPodChangePolicy) + pod2, err := startPausePodWithSELinuxOptions(f.ClientSet, claim, nodeSelection, f.Namespace.Name, t.secondPodSELinuxOpts, t.secondPodChangePolicy, t.secondPodPrivileged) framework.ExpectNoError(err, "creating second pod with SELinux context %s", t.secondPodSELinuxOpts) m.pods = append(m.pods, pod2) diff --git a/test/e2e/storage/external/external.go b/test/e2e/storage/external/external.go index 60bbeae8968bf..67853d638ff69 100644 --- a/test/e2e/storage/external/external.go +++ b/test/e2e/storage/external/external.go @@ -152,6 +152,12 @@ type driverDefinition struct { // use topology to ensure that pods land on the right node(s). ClientNodeName string + // NodeSelectors is used to specify nodeSelector information for pod deployment + // during the tests. This is beneficial when needing to control placement + // for specialized environments. Most drivers should not need this and + // instead can use topolgy to ensure that pods land on the right node(s). + NodeSelectors map[string]string + // Timeouts contains the custom timeouts used during the test execution. // The values specified here will override the default values specified in // the framework.TimeoutContext struct. @@ -493,5 +499,10 @@ func (d *driverDefinition) PrepareTest(ctx context.Context, f *framework.Framewo e2econfig.ClientNodeSelection.Selector = map[string]string{"kubernetes.io/os": "linux"} } + // Add all provided nodeSelector settings + for key, value := range d.NodeSelectors { + e2econfig.ClientNodeSelection.Selector[key] = value + } + return e2econfig } diff --git a/test/e2e_node/runtime_conformance_test.go b/test/e2e_node/runtime_conformance_test.go deleted file mode 100644 index 0aa256d40030f..0000000000000 --- a/test/e2e_node/runtime_conformance_test.go +++ /dev/null @@ -1,156 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package e2enode - -import ( - "context" - "fmt" - "os" - "path/filepath" - "time" - - v1 "k8s.io/api/core/v1" - "k8s.io/kubernetes/pkg/kubelet/images" - "k8s.io/kubernetes/test/e2e/common/node" - "k8s.io/kubernetes/test/e2e/framework" - e2epod "k8s.io/kubernetes/test/e2e/framework/pod" - "k8s.io/kubernetes/test/e2e_node/services" - admissionapi "k8s.io/pod-security-admission/api" - - "github.com/onsi/ginkgo/v2" -) - -var _ = SIGDescribe("Container Runtime Conformance Test", func() { - f := framework.NewDefaultFramework("runtime-conformance") - f.NamespacePodSecurityLevel = admissionapi.LevelBaseline - - ginkgo.Describe("container runtime conformance blackbox test", func() { - - ginkgo.Context("when running a container with a new image", func() { - // The service account only has pull permission - auth := ` -{ - "auths": { - "https://gcr.io": { - "auth": "X2pzb25fa2V5OnsKICAidHlwZSI6ICJzZXJ2aWNlX2FjY291bnQiLAogICJwcm9qZWN0X2lkIjogImF1dGhlbnRpY2F0ZWQtaW1hZ2UtcHVsbGluZyIsCiAgInByaXZhdGVfa2V5X2lkIjogImI5ZjJhNjY0YWE5YjIwNDg0Y2MxNTg2MDYzZmVmZGExOTIyNGFjM2IiLAogICJwcml2YXRlX2tleSI6ICItLS0tLUJFR0lOIFBSSVZBVEUgS0VZLS0tLS1cbk1JSUV2UUlCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktjd2dnU2pBZ0VBQW9JQkFRQzdTSG5LVEVFaVlMamZcbkpmQVBHbUozd3JCY2VJNTBKS0xxS21GWE5RL3REWGJRK2g5YVl4aldJTDhEeDBKZTc0bVovS01uV2dYRjVLWlNcbm9BNktuSU85Yi9SY1NlV2VpSXRSekkzL1lYVitPNkNjcmpKSXl4anFWam5mVzJpM3NhMzd0OUE5VEZkbGZycm5cbjR6UkpiOWl4eU1YNGJMdHFGR3ZCMDNOSWl0QTNzVlo1ODhrb1FBZmgzSmhhQmVnTWorWjRSYko0aGVpQlFUMDNcbnZVbzViRWFQZVQ5RE16bHdzZWFQV2dydDZOME9VRGNBRTl4bGNJek11MjUzUG4vSzgySFpydEx4akd2UkhNVXhcbng0ZjhwSnhmQ3h4QlN3Z1NORit3OWpkbXR2b0wwRmE3ZGducFJlODZWRDY2ejNZenJqNHlLRXRqc2hLZHl5VWRcbkl5cVhoN1JSQWdNQkFBRUNnZ0VBT3pzZHdaeENVVlFUeEFka2wvSTVTRFVidi9NazRwaWZxYjJEa2FnbmhFcG9cbjFJajJsNGlWMTByOS9uenJnY2p5VlBBd3pZWk1JeDFBZVF0RDdoUzRHWmFweXZKWUc3NkZpWFpQUm9DVlB6b3VcbmZyOGRDaWFwbDV0enJDOWx2QXNHd29DTTdJWVRjZmNWdDdjRTEyRDNRS3NGNlo3QjJ6ZmdLS251WVBmK0NFNlRcbmNNMHkwaCtYRS9kMERvSERoVy96YU1yWEhqOFRvd2V1eXRrYmJzNGYvOUZqOVBuU2dET1lQd2xhbFZUcitGUWFcbkpSd1ZqVmxYcEZBUW14M0Jyd25rWnQzQ2lXV2lGM2QrSGk5RXRVYnRWclcxYjZnK1JRT0licWFtcis4YlJuZFhcbjZWZ3FCQWtKWjhSVnlkeFVQMGQxMUdqdU9QRHhCbkhCbmM0UW9rSXJFUUtCZ1FEMUNlaWN1ZGhXdGc0K2dTeGJcbnplanh0VjFONDFtZHVjQnpvMmp5b1dHbzNQVDh3ckJPL3lRRTM0cU9WSi9pZCs4SThoWjRvSWh1K0pBMDBzNmdcblRuSXErdi9kL1RFalk4MW5rWmlDa21SUFdiWHhhWXR4UjIxS1BYckxOTlFKS2ttOHRkeVh5UHFsOE1veUdmQ1dcbjJ2aVBKS05iNkhabnY5Q3lqZEo5ZzJMRG5RS0JnUUREcVN2eURtaGViOTIzSW96NGxlZ01SK205Z2xYVWdTS2dcbkVzZlllbVJmbU5XQitDN3ZhSXlVUm1ZNU55TXhmQlZXc3dXRldLYXhjK0krYnFzZmx6elZZdFpwMThNR2pzTURcbmZlZWZBWDZCWk1zVXQ3Qmw3WjlWSjg1bnRFZHFBQ0xwWitaLzN0SVJWdWdDV1pRMWhrbmxHa0dUMDI0SkVFKytcbk55SDFnM2QzUlFLQmdRQ1J2MXdKWkkwbVBsRklva0tGTkh1YTBUcDNLb1JTU1hzTURTVk9NK2xIckcxWHJtRjZcbkMwNGNTKzQ0N0dMUkxHOFVUaEpKbTRxckh0Ti9aK2dZOTYvMm1xYjRIakpORDM3TVhKQnZFYTN5ZUxTOHEvK1JcbjJGOU1LamRRaU5LWnhQcG84VzhOSlREWTVOa1BaZGh4a2pzSHdVNGRTNjZwMVRESUU0MGd0TFpaRFFLQmdGaldcbktyblFpTnEzOS9iNm5QOFJNVGJDUUFKbmR3anhTUU5kQTVmcW1rQTlhRk9HbCtqamsxQ1BWa0tNSWxLSmdEYkpcbk9heDl2OUc2Ui9NSTFIR1hmV3QxWU56VnRocjRIdHNyQTB0U3BsbWhwZ05XRTZWejZuQURqdGZQSnMyZUdqdlhcbmpQUnArdjhjY21MK3dTZzhQTGprM3ZsN2VlNXJsWWxNQndNdUdjUHhBb0dBZWRueGJXMVJMbVZubEFpSEx1L0xcbmxtZkF3RFdtRWlJMFVnK1BMbm9Pdk81dFE1ZDRXMS94RU44bFA0cWtzcGtmZk1Rbk5oNFNZR0VlQlQzMlpxQ1RcbkpSZ2YwWGpveXZ2dXA5eFhqTWtYcnBZL3ljMXpmcVRaQzBNTzkvMVVjMWJSR2RaMmR5M2xSNU5XYXA3T1h5Zk9cblBQcE5Gb1BUWGd2M3FDcW5sTEhyR3pNPVxuLS0tLS1FTkQgUFJJVkFURSBLRVktLS0tLVxuIiwKICAiY2xpZW50X2VtYWlsIjogImltYWdlLXB1bGxpbmdAYXV0aGVudGljYXRlZC1pbWFnZS1wdWxsaW5nLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwKICAiY2xpZW50X2lkIjogIjExMzc5NzkxNDUzMDA3MzI3ODcxMiIsCiAgImF1dGhfdXJpIjogImh0dHBzOi8vYWNjb3VudHMuZ29vZ2xlLmNvbS9vL29hdXRoMi9hdXRoIiwKICAidG9rZW5fdXJpIjogImh0dHBzOi8vYWNjb3VudHMuZ29vZ2xlLmNvbS9vL29hdXRoMi90b2tlbiIsCiAgImF1dGhfcHJvdmlkZXJfeDUwOV9jZXJ0X3VybCI6ICJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9vYXV0aDIvdjEvY2VydHMiLAogICJjbGllbnRfeDUwOV9jZXJ0X3VybCI6ICJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9yb2JvdC92MS9tZXRhZGF0YS94NTA5L2ltYWdlLXB1bGxpbmclNDBhdXRoZW50aWNhdGVkLWltYWdlLXB1bGxpbmcuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iCn0=", - "email": "image-pulling@authenticated-image-pulling.iam.gserviceaccount.com" - } - } -}` - // The following images are not added into NodePrePullImageList, because this test is - // testing image pulling, these images don't need to be prepulled. The ImagePullPolicy - // is v1.PullAlways, so it won't be blocked by framework image pre-pull list check. - for _, testCase := range []struct { - description string - image string - phase v1.PodPhase - waiting bool - }{ - { - description: "should be able to pull from private registry with credential provider", - image: "gcr.io/authenticated-image-pulling/alpine:3.7", - phase: v1.PodRunning, - waiting: false, - }, - } { - testCase := testCase - f.It(testCase.description+"", f.WithNodeConformance(), func(ctx context.Context) { - name := "image-pull-test" - command := []string{"/bin/sh", "-c", "while true; do sleep 1; done"} - container := node.ConformanceContainer{ - PodClient: e2epod.NewPodClient(f), - Container: v1.Container{ - Name: name, - Image: testCase.image, - Command: command, - // PullAlways makes sure that the image will always be pulled even if it is present before the test. - ImagePullPolicy: v1.PullAlways, - }, - RestartPolicy: v1.RestartPolicyNever, - } - - configFile := filepath.Join(services.KubeletRootDirectory, "config.json") - err := os.WriteFile(configFile, []byte(auth), 0644) - framework.ExpectNoError(err) - defer os.Remove(configFile) - - // checkContainerStatus checks whether the container status matches expectation. - checkContainerStatus := func(ctx context.Context) error { - status, err := container.GetStatus(ctx) - if err != nil { - return fmt.Errorf("failed to get container status: %w", err) - } - // We need to check container state first. The default pod status is pending, If we check - // pod phase first, and the expected pod phase is Pending, the container status may not - // even show up when we check it. - // Check container state - if !testCase.waiting { - if status.State.Running == nil { - return fmt.Errorf("expected container state: Running, got: %q", - node.GetContainerState(status.State)) - } - } - if testCase.waiting { - if status.State.Waiting == nil { - return fmt.Errorf("expected container state: Waiting, got: %q", - node.GetContainerState(status.State)) - } - reason := status.State.Waiting.Reason - if reason != images.ErrImagePull.Error() && - reason != images.ErrImagePullBackOff.Error() { - return fmt.Errorf("unexpected waiting reason: %q", reason) - } - } - // Check pod phase - phase, err := container.GetPhase(ctx) - if err != nil { - return fmt.Errorf("failed to get pod phase: %w", err) - } - if phase != testCase.phase { - return fmt.Errorf("expected pod phase: %q, got: %q", testCase.phase, phase) - } - return nil - } - // The image registry is not stable, which sometimes causes the test to fail. Add retry mechanism to make this - // less flaky. - const flakeRetry = 3 - for i := 1; i <= flakeRetry; i++ { - var err error - ginkgo.By("create the container") - container.Create(ctx) - ginkgo.By("check the container status") - for start := time.Now(); time.Since(start) < node.ContainerStatusRetryTimeout; time.Sleep(node.ContainerStatusPollInterval) { - if err = checkContainerStatus(ctx); err == nil { - break - } - } - ginkgo.By("delete the container") - _ = container.Delete(ctx) - if err == nil { - break - } - if i < flakeRetry { - framework.Logf("No.%d attempt failed: %v, retrying...", i, err) - } else { - framework.Failf("All %d attempts failed: %v", flakeRetry, err) - } - } - }) - } - }) - }) -}) diff --git a/test/images/.permitted-images b/test/images/.permitted-images index ec7dac61ab5c3..042af1417c8b7 100644 --- a/test/images/.permitted-images +++ b/test/images/.permitted-images @@ -4,7 +4,6 @@ # The sources for which are in test/images/agnhost. # If agnhost is missing functionality for your tests, please reach out to SIG Testing. gcr.io/authenticated-image-pulling/alpine -gcr.io/authenticated-image-pulling/windows-nanoserver gcr.io/k8s-authenticated-test/agnhost invalid.registry.k8s.io/invalid/alpine registry.k8s.io/build-image/distroless-iptables diff --git a/test/images/Makefile b/test/images/Makefile index 112ccc7a87106..ccbb969f01cfc 100644 --- a/test/images/Makefile +++ b/test/images/Makefile @@ -16,7 +16,7 @@ REGISTRY ?= registry.k8s.io/e2e-test-images GOARM ?= 7 DOCKER_CERT_BASE_PATH ?= QEMUVERSION=v5.1.0-2 -GOLANG_VERSION=1.24.4 +GOLANG_VERSION=1.24.6 export ifndef WHAT diff --git a/test/utils/image/manifest.go b/test/utils/image/manifest.go index 7574ceaf6b247..0738453b151bf 100644 --- a/test/utils/image/manifest.go +++ b/test/utils/image/manifest.go @@ -129,13 +129,17 @@ func readFromURL(url string, writer io.Writer) error { var ( initRegistry = RegistryList{ - GcAuthenticatedRegistry: "gcr.io/authenticated-image-pulling", - PromoterE2eRegistry: "registry.k8s.io/e2e-test-images", - BuildImageRegistry: "registry.k8s.io/build-image", - InvalidRegistry: "invalid.registry.k8s.io/invalid", - GcEtcdRegistry: "registry.k8s.io", - GcRegistry: "registry.k8s.io", - SigStorageRegistry: "registry.k8s.io/sig-storage", + // TODO: https://github.com/kubernetes/kubernetes/issues/130271 + // Eliminate GcAuthenticatedRegistry. + GcAuthenticatedRegistry: "gcr.io/authenticated-image-pulling", + PromoterE2eRegistry: "registry.k8s.io/e2e-test-images", + BuildImageRegistry: "registry.k8s.io/build-image", + InvalidRegistry: "invalid.registry.k8s.io/invalid", + GcEtcdRegistry: "registry.k8s.io", + GcRegistry: "registry.k8s.io", + SigStorageRegistry: "registry.k8s.io/sig-storage", + // TODO: https://github.com/kubernetes/kubernetes/issues/130271 + // Eliminate PrivateRegistry. PrivateRegistry: "gcr.io/k8s-authenticated-test", DockerLibraryRegistry: "docker.io/library", CloudProviderGcpRegistry: "registry.k8s.io/cloud-provider-gcp", @@ -152,15 +156,17 @@ const ( // Agnhost image Agnhost // AgnhostPrivate image + // TODO: https://github.com/kubernetes/kubernetes/issues/130271 + // Eliminate this. AgnhostPrivate // APIServer image APIServer // AppArmorLoader image AppArmorLoader // AuthenticatedAlpine image + // TODO: https://github.com/kubernetes/kubernetes/issues/130271 + // Eliminate this. AuthenticatedAlpine - // AuthenticatedWindowsNanoServer image - AuthenticatedWindowsNanoServer // BusyBox image BusyBox // DistrolessIptables Image @@ -219,11 +225,10 @@ func initImageConfigs(list RegistryList) (map[ImageID]Config, map[ImageID]Config configs[Agnhost] = Config{list.PromoterE2eRegistry, "agnhost", "2.53"} configs[AgnhostPrivate] = Config{list.PrivateRegistry, "agnhost", "2.6"} configs[AuthenticatedAlpine] = Config{list.GcAuthenticatedRegistry, "alpine", "3.7"} - configs[AuthenticatedWindowsNanoServer] = Config{list.GcAuthenticatedRegistry, "windows-nanoserver", "v1"} configs[APIServer] = Config{list.PromoterE2eRegistry, "sample-apiserver", "1.29.2"} configs[AppArmorLoader] = Config{list.PromoterE2eRegistry, "apparmor-loader", "1.4"} configs[BusyBox] = Config{list.PromoterE2eRegistry, "busybox", "1.36.1-1"} - configs[DistrolessIptables] = Config{list.BuildImageRegistry, "distroless-iptables", "v0.7.6"} + configs[DistrolessIptables] = Config{list.BuildImageRegistry, "distroless-iptables", "v0.7.8"} configs[Etcd] = Config{list.GcEtcdRegistry, "etcd", "3.5.21-0"} configs[Httpd] = Config{list.PromoterE2eRegistry, "httpd", "2.4.38-4"} configs[HttpdNew] = Config{list.PromoterE2eRegistry, "httpd", "2.4.39-4"} @@ -270,7 +275,7 @@ func GetMappedImageConfigs(originalImageConfigs map[ImageID]Config, repo string) for i, config := range originalImageConfigs { switch i { case InvalidRegistryImage, AuthenticatedAlpine, - AuthenticatedWindowsNanoServer, AgnhostPrivate: + AgnhostPrivate: // These images are special and can't be run out of the cloud - some because they // are authenticated, and others because they are not real images. Tests that depend // on these images can't be run without access to the public internet.