diff --git a/README.md b/README.md index adc2913..e016df4 100644 --- a/README.md +++ b/README.md @@ -197,8 +197,8 @@ No modules. | [network\_id](#input\_network\_id) | The ID of the cluster network. | `string` | n/a | yes | | [network\_policy\_provider](#input\_network\_policy\_provider) | Network policy provider for Kubernetes cluster | `string` | `"CALICO"` | no | | [node\_account\_name](#input\_node\_account\_name) | IAM node account name. | `string` | `"k8s-node-account"` | no | -| [node\_groups](#input\_node\_groups) | Kubernetes node groups map of maps. It could contain all parameters of nebius\_kubernetes\_node\_group resource,
many of them could be NULL and have default values.

Notes:
- If node groups version isn't defined, cluster version will be used instead of.
- A master locations list must have only one location for zonal cluster and three locations for a regional.
- All node groups are able to define own locations. These locations will be used at first.
- If own location aren't defined for node groups with auto scale policy, locations for these groups will be automatically generated from master locations. If node groups list have more than three groups, locations for them will be assigned from the beggining of the master locations list. So, all node groups will be distributed in a range of master locations.
- Master locations will be used for fixed scale node groups.
- Auto repair and upgrade values will be used master\_auto\_upgrade value.
- Master maintenance windows will be used for Node groups also!
- Only one max\_expansion OR max\_unavailable values should be specified for the deployment policy.

Documentation - https://registry.terraform.io/providers/nebius-cloud/nebius/latest/docs/resources/kubernetes_node_group

Default values:
platform_id     = "standard-v3"
node_cores = 4
node_memory = 8
node_gpus = 0
core_fraction = 100
disk_type = "network-ssd"
disk_size = 32
preemptible = false
nat = false
auto_repair = true
auto_upgrade = true
maintenance_day = "monday"
maintenance_start_time = "20:00"
maintenance_duration = "3h30m"
network_acceleration_type = "standard"
container_runtime_type = "containerd"
Example:
node_groups = {
"yc-k8s-ng-01" = {
cluster_name = "k8s-kube-cluster"
description = "Kubernetes nodes group with fixed scale policy and one maintenance window"
fixed_scale = {
size = 3
}
labels = {
owner = "nebius"
service = "kubernetes"
}
node_labels = {
role = "worker-01"
environment = "dev"
}
},
"yc-k8s-ng-02" = {
description = "Kubernetes nodes group with auto scale policy"
auto_scale = {
min = 2
max = 4
initial = 2
}
node_locations = [
{
zone = "ru-central1-b"
subnet_id = "e2lu07tr481h35012c8p"
}
]
labels = {
owner = "example"
service = "kubernetes"
}
node_labels = {
role = "worker-02"
environment = "testing"
}
}
}
| `any` | `{}` | no | -| [node\_groups\_defaults](#input\_node\_groups\_defaults) | Map of common default values for Node groups. | `map(any)` |
{
"core_fraction": 100,
"disk_size": 32,
"disk_type": "network-ssd",
"ipv4": true,
"ipv6": false,
"nat": false,
"node_cores": 4,
"node_gpus": 0,
"node_memory": 8,
"platform_id": "standard-v3",
"preemptible": false
}
| no | +| [node\_groups](#input\_node\_groups) | Kubernetes node groups map of maps. It could contain all parameters of nebius\_kubernetes\_node\_group resource,
many of them could be NULL and have default values.

Notes:
- If node groups version isn't defined, cluster version will be used instead of.
- A master locations list must have only one location for zonal cluster and three locations for a regional.
- All node groups are able to define own locations. These locations will be used at first.
- If own location aren't defined for node groups with auto scale policy, locations for these groups will be automatically generated from master locations. If node groups list have more than three groups, locations for them will be assigned from the beggining of the master locations list. So, all node groups will be distributed in a range of master locations.
- Master locations will be used for fixed scale node groups.
- Auto repair and upgrade values will be used master\_auto\_upgrade value.
- Master maintenance windows will be used for Node groups also!
- Only one max\_expansion OR max\_unavailable values should be specified for the deployment policy.

Documentation - https://registry.terraform.io/providers/nebius-cloud/nebius/latest/docs/resources/kubernetes_node_group

Default values:
platform_id     = "standard-v2"
node_cores = 4
node_memory = 8
node_gpus = 0
core_fraction = 100
disk_type = "network-ssd"
disk_size = 32
preemptible = false
nat = false
auto_repair = true
auto_upgrade = true
maintenance_day = "monday"
maintenance_start_time = "20:00"
maintenance_duration = "3h30m"
network_acceleration_type = "standard"
container_runtime_type = "containerd"
Example:
node_groups = {
"yc-k8s-ng-01" = {
cluster_name = "k8s-kube-cluster"
description = "Kubernetes nodes group with fixed scale policy and one maintenance window"
fixed_scale = {
size = 3
}
labels = {
owner = "nebius"
service = "kubernetes"
}
node_labels = {
role = "worker-01"
environment = "dev"
}
},
"yc-k8s-ng-02" = {
description = "Kubernetes nodes group with auto scale policy"
auto_scale = {
min = 2
max = 4
initial = 2
}
node_locations = [
{
zone = "ru-central1-b"
subnet_id = "e2lu07tr481h35012c8p"
}
]
labels = {
owner = "example"
service = "kubernetes"
}
node_labels = {
role = "worker-02"
environment = "testing"
}
}
}
| `any` | `{}` | no | +| [node\_groups\_defaults](#input\_node\_groups\_defaults) | Map of common default values for Node groups. | `map(any)` |
{
"core_fraction": 100,
"disk_size": 32,
"disk_type": "network-ssd",
"ipv4": true,
"ipv6": false,
"nat": false,
"node_cores": 4,
"node_gpus": 0,
"node_memory": 8,
"platform_id": "standard-v2",
"preemptible": false
}
| no | | [node\_ipv4\_cidr\_mask\_size](#input\_node\_ipv4\_cidr\_mask\_size) | (Optional) Size of the masks that are assigned to each node in the cluster.
This efficiently limits the maximum number of pods for each node. | `number` | `24` | no | | [public\_access](#input\_public\_access) | Public or private Kubernetes cluster | `bool` | `true` | no | | [release\_channel](#input\_release\_channel) | Kubernetes cluster release channel name | `string` | `"REGULAR"` | no | @@ -288,8 +288,8 @@ No modules. | [network\_id](#input\_network\_id) | The ID of the cluster network. | `string` | n/a | yes | | [network\_policy\_provider](#input\_network\_policy\_provider) | Kubernetes cluster network policy provider | `string` | `"CALICO"` | no | | [node\_account\_name](#input\_node\_account\_name) | IAM node account name. | `string` | `"k8s-node-account"` | no | -| [node\_groups](#input\_node\_groups) | Kubernetes node groups map of maps. It could contain all parameters of nebius\_kubernetes\_node\_group resource,
many of them could be NULL and have default values.

Notes:
- If node groups version isn't defined, cluster version will be used instead of.
- A master locations list must have only one location for zonal cluster and three locations for a regional.
- All node groups are able to define own locations. These locations will be used at first.
- If own location aren't defined for node groups with auto scale policy, locations for these groups will be automatically generated from master locations. If node groups list have more than three groups, locations for them will be assigned from the beggining of the master locations list. So, all node groups will be distributed in a range of master locations.
- Master locations will be used for fixed scale node groups.
- Auto repair and upgrade values will be used master\_auto\_upgrade value.
- Master maintenance windows will be used for Node groups also!
- Only one max\_expansion OR max\_unavailable values should be specified for the deployment policy.

Documentation - https://registry.terraform.io/providers/nebius-cloud/nebius/latest/docs/resources/kubernetes_node_group

Default values:
platform_id     = "standard-v3"
node_cores = 4
node_memory = 8
node_gpus = 0
core_fraction = 100
disk_type = "network-ssd"
disk_size = 32
preemptible = false
nat = false
auto_repair = true
auto_upgrade = true
maintenance_day = "monday"
maintenance_start_time = "20:00"
maintenance_duration = "3h30m"
network_acceleration_type = "standard"
container_runtime_type = "containerd"
Example:
node_groups = {
"yc-k8s-ng-01" = {
cluster_name = "k8s-kube-cluster"
description = "Kubernetes nodes group with fixed scale policy and one maintenance window"
fixed_scale = {
size = 3
}
labels = {
owner = "nebius"
service = "kubernetes"
}
node_labels = {
role = "worker-01"
environment = "dev"
}
},
"yc-k8s-ng-02" = {
description = "Kubernetes nodes group with auto scale policy"
auto_scale = {
min = 2
max = 4
initial = 2
}
node_locations = [
{
zone = "ru-central1-b"
subnet_id = "e2lu07tr481h35012c8p"
}
]
labels = {
owner = "example"
service = "kubernetes"
}
node_labels = {
role = "worker-02"
environment = "testing"
}
}
}
| `any` | `{}` | no | -| [node\_groups\_defaults](#input\_node\_groups\_defaults) | A map of common default values for Node groups. | `map` |
{
"core_fraction": 100,
"disk_size": 32,
"disk_type": "network-ssd",
"ipv4": true,
"ipv6": false,
"nat": false,
"node_cores": 4,
"node_gpus": 0,
"node_memory": 8,
"platform_id": "standard-v3",
"preemptible": false
}
| no | +| [node\_groups](#input\_node\_groups) | Kubernetes node groups map of maps. It could contain all parameters of nebius\_kubernetes\_node\_group resource,
many of them could be NULL and have default values.

Notes:
- If node groups version isn't defined, cluster version will be used instead of.
- A master locations list must have only one location for zonal cluster and three locations for a regional.
- All node groups are able to define own locations. These locations will be used at first.
- If own location aren't defined for node groups with auto scale policy, locations for these groups will be automatically generated from master locations. If node groups list have more than three groups, locations for them will be assigned from the beggining of the master locations list. So, all node groups will be distributed in a range of master locations.
- Master locations will be used for fixed scale node groups.
- Auto repair and upgrade values will be used master\_auto\_upgrade value.
- Master maintenance windows will be used for Node groups also!
- Only one max\_expansion OR max\_unavailable values should be specified for the deployment policy.

Documentation - https://registry.terraform.io/providers/nebius-cloud/nebius/latest/docs/resources/kubernetes_node_group

Default values:
platform_id     = "standard-v2"
node_cores = 4
node_memory = 8
node_gpus = 0
core_fraction = 100
disk_type = "network-ssd"
disk_size = 32
preemptible = false
nat = false
auto_repair = true
auto_upgrade = true
maintenance_day = "monday"
maintenance_start_time = "20:00"
maintenance_duration = "3h30m"
network_acceleration_type = "standard"
container_runtime_type = "containerd"
Example:
node_groups = {
"yc-k8s-ng-01" = {
cluster_name = "k8s-kube-cluster"
description = "Kubernetes nodes group with fixed scale policy and one maintenance window"
fixed_scale = {
size = 3
}
labels = {
owner = "nebius"
service = "kubernetes"
}
node_labels = {
role = "worker-01"
environment = "dev"
}
},
"yc-k8s-ng-02" = {
description = "Kubernetes nodes group with auto scale policy"
auto_scale = {
min = 2
max = 4
initial = 2
}
node_locations = [
{
zone = "ru-central1-b"
subnet_id = "e2lu07tr481h35012c8p"
}
]
labels = {
owner = "example"
service = "kubernetes"
}
node_labels = {
role = "worker-02"
environment = "testing"
}
}
}
| `any` | `{}` | no | +| [node\_groups\_defaults](#input\_node\_groups\_defaults) | A map of common default values for Node groups. | `map` |
{
"core_fraction": 100,
"disk_size": 32,
"disk_type": "network-ssd",
"ipv4": true,
"ipv6": false,
"nat": false,
"node_cores": 4,
"node_gpus": 0,
"node_memory": 8,
"platform_id": "standard-v2",
"preemptible": false
}
| no | | [node\_ipv4\_cidr\_mask\_size](#input\_node\_ipv4\_cidr\_mask\_size) | (Optional) Size of the masks that are assigned to each node in the cluster.
Effectively limits maximum number of pods for each node. | `number` | `24` | no | | [public\_access](#input\_public\_access) | Public or private Kubernetes cluster | `bool` | `true` | no | | [release\_channel](#input\_release\_channel) | Kubernetes cluster release channel name | `string` | `"REGULAR"` | no | diff --git a/variables.tf b/variables.tf index baaddf4..354211a 100644 --- a/variables.tf +++ b/variables.tf @@ -233,7 +233,7 @@ variable "node_groups" { Default values: ``` - platform_id = "standard-v3" + platform_id = "standard-v2" node_cores = 4 node_memory = 8 node_gpus = 0 @@ -301,7 +301,7 @@ variable "node_groups_defaults" { description = "Map of common default values for Node groups." type = map(any) default = { - platform_id = "standard-v3" + platform_id = "standard-v2" node_cores = 4 node_memory = 8 node_gpus = 0